diff --git a/OracleAccessManagement/kubernetes/README.md b/OracleAccessManagement/kubernetes/README.md old mode 100644 new mode 100755 diff --git a/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json b/OracleAccessManagement/kubernetes/monitoring-service/config/weblogic-server-dashboard.json old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/README.md b/OracleIdentityGovernance/kubernetes/README.md old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/create-domain-job.sh b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/create-domain-job.sh old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/createFMWDomain.py b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/common/createFMWDomain.py old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-inputs.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-inputs.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-job-template.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/create-domain-job-template.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/delete-domain-job-template.yaml b/OracleIdentityGovernance/kubernetes/create-oim-domain/domain-home-on-pv/delete-domain-job-template.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/design-console-ingress/Chart.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/Chart.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/design-console-ingress/README.md b/OracleIdentityGovernance/kubernetes/design-console-ingress/README.md old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress-k8s1.19.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress-k8s1.19.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/templates/nginx-ingress.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/design-console-ingress/values.yaml b/OracleIdentityGovernance/kubernetes/design-console-ingress/values.yaml old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json b/OracleIdentityGovernance/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json old mode 100644 new mode 100755 diff --git a/OracleIdentityGovernance/kubernetes/weblogic_dashboard.json b/OracleIdentityGovernance/kubernetes/weblogic_dashboard.json old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/README.md b/OracleInternetDirectory/kubernetes/README.md old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/.helmignore b/OracleInternetDirectory/kubernetes/helm/.helmignore old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/Chart.yaml b/OracleInternetDirectory/kubernetes/helm/oid/Chart.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/_helpers.tpl b/OracleInternetDirectory/kubernetes/helm/oid/templates/_helpers.tpl old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/cluster-rolebinding.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/cluster-rolebinding.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/clusterrole.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/clusterrole.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/ingress-nginx.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/ingress-nginx.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/ingress-voyager.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/ingress-voyager.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/pod.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/pod.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/pods.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/pods.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/pv.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/pv.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/pvc.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/pvc.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/rolebinding.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/rolebinding.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/secret.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/secret.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/service-lbr-ldap.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/service-lbr-ldap.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/service.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/service.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/serviceaccount.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/serviceaccount.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/services.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/services.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/templates/tls-secret.yaml b/OracleInternetDirectory/kubernetes/helm/oid/templates/tls-secret.yaml old mode 100644 new mode 100755 diff --git a/OracleInternetDirectory/kubernetes/helm/oid/values.yaml b/OracleInternetDirectory/kubernetes/helm/oid/values.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/README.md b/OracleUnifiedDirectory/kubernetes/README.md old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/.gitignore b/OracleUnifiedDirectory/kubernetes/helm/.gitignore old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/README.md b/OracleUnifiedDirectory/kubernetes/helm/README.md old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/.helmignore b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/.helmignore old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/Chart.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/Chart.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/README.md b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/README.md old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/NOTES.txt b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/NOTES.txt old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/_helpers.tpl b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/_helpers.tpl old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/cluster-rolebinding.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/cluster-rolebinding.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/clusterrole.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/clusterrole.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_elasticsearch-svc.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_elasticsearch-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_elasticsearch.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_elasticsearch.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_kibana.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_kibana.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash-configMap.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash-configMap.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_logstash.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_pv-elasticsearch.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_pv-elasticsearch.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_storageclass.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/elk_storageclass.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-19-admin.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-19-admin.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-19-http.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-19-http.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-admin.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-admin.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-http.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/ingress-nginx-http.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/oud-cron-job.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/oud-cron-job.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pod.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pod.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pods.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pods.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pv-config.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pv-config.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pv-job.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pv-job.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pv.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pv.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pvc-config.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pvc-config.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pvc-job.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pvc-job.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pvc.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/pvc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/role.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/role.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/rolebinding.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/rolebinding.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/secret.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/secret.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-admin.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-admin.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-all.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-all.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-http.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-http.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-lbr-admin.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-lbr-admin.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-lbr-http.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-lbr-http.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-lbr-ldap.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-lbr-ldap.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-ldap.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/service-ldap.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/serviceaccount.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/serviceaccount.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-admin.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-admin.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-all.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-all.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-http.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-http.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-ldap.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/services-ldap.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/svc-account.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/svc-account.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/test/oud-ds-rs-tests.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/test/oud-ds-rs-tests.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/tls-secret.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/templates/tls-secret.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/values.yaml b/OracleUnifiedDirectory/kubernetes/helm/oud-ds-rs/values.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/samples/oud-ds-rs_nginx-ingress_values.yaml b/OracleUnifiedDirectory/kubernetes/helm/samples/oud-ds-rs_nginx-ingress_values.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/helm/samples/oud-ds-rs_values.yaml b/OracleUnifiedDirectory/kubernetes/helm/samples/oud-ds-rs_values.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-dir-pod.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-dir-pod.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-dir-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-dir-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-remote-oud-svcs.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-remote-oud-svcs.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds-plus-rs-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds_proxy-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds_proxy-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/oud-ds_rs_ds-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/oud-ds_rs_ds-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/oudns.yaml b/OracleUnifiedDirectory/kubernetes/samples/oudns.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/persistent-volume.yaml b/OracleUnifiedDirectory/kubernetes/samples/persistent-volume.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/secrets.yaml b/OracleUnifiedDirectory/kubernetes/samples/secrets.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/stress-oud-dir-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/stress-oud-dir-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectory/kubernetes/samples/stress-oud-ds-plus-rs-svc.yaml b/OracleUnifiedDirectory/kubernetes/samples/stress-oud-ds-plus-rs-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/README.md b/OracleUnifiedDirectorySM/kubernetes/README.md old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/README.md b/OracleUnifiedDirectorySM/kubernetes/helm/README.md old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/.helmignore b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/.helmignore old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/Chart.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/Chart.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/README.md b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/README.md old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/_helpers.tpl b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/_helpers.tpl old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/cluster-rolebinding.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/cluster-rolebinding.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/clusterrole.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/clusterrole.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_elasticsearch-svc.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_elasticsearch-svc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_elasticsearch.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_elasticsearch.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_kibana.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_kibana.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash-configMap.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash-configMap.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_logstash.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_pv-elasticsearch.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_pv-elasticsearch.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_storageclass.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/elk_storageclass.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/ingress-nginx-19.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/ingress-nginx-19.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/ingress-nginx.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/ingress-nginx.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/pods.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/pods.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/pv.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/pv.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/pvc.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/pvc.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/role.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/role.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/rolebinding.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/rolebinding.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/secret.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/secret.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/service-lbr.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/service-lbr.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/serviceaccount.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/serviceaccount.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/services.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/services.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/tls-secret.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/templates/tls-secret.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/values.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/oudsm/values.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/samples/oudsm_nginx-ingress_values.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/samples/oudsm_nginx-ingress_values.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/helm/samples/oudsm_values.yaml b/OracleUnifiedDirectorySM/kubernetes/helm/samples/oudsm_values.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-deployment.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-deployment.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-pod.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/oudsm-pod.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/oudsmns.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/oudsmns.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/persistent-volume.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/persistent-volume.yaml old mode 100644 new mode 100755 diff --git a/OracleUnifiedDirectorySM/kubernetes/samples/secrets.yaml b/OracleUnifiedDirectorySM/kubernetes/samples/secrets.yaml old mode 100644 new mode 100755 diff --git a/docs-source/content/oudsm/troubleshooting/_index.md b/docs-source/content/oudsm/troubleshooting/_index.md index 3465d8234..a505a4123 100644 --- a/docs-source/content/oudsm/troubleshooting/_index.md +++ b/docs-source/content/oudsm/troubleshooting/_index.md @@ -1,7 +1,7 @@ +++ title = "Troubleshooting" -weight = 8 -pre = "8. " +weight = 9 +pre = "9. " description = "How to Troubleshoot issues." +++ 1. [Check the status of a namespace](#check-the-status-of-a-namespace) diff --git a/docs/22.2.1/404.html b/docs/22.2.1/404.html new file mode 100644 index 000000000..d67b3df81 --- /dev/null +++ b/docs/22.2.1/404.html @@ -0,0 +1,57 @@ + + + + + + + + + 404 Page not found + + + + + + + + + + + + + + + + +
+
+
+
+

Error

+

+

+

Woops. Looks like this page doesn't exist ¯\_(ツ)_/¯.

+

+

Go to homepage

+

Page not found!

+
+
+ +
+ + + diff --git a/docs/22.2.1/categories/index.html b/docs/22.2.1/categories/index.html new file mode 100644 index 000000000..4acb82efc --- /dev/null +++ b/docs/22.2.1/categories/index.html @@ -0,0 +1,3615 @@ + + + + + + + + + + + + Categories :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+
+ + + + +
+
+ +
+
+ + +
+
+ +
+ +
+ +
+ +

+ + Categories +

+ + + + + + + + + +
    + +
+ + +
+ +
+ + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + diff --git a/docs/22.2.1/categories/index.xml b/docs/22.2.1/categories/index.xml new file mode 100644 index 000000000..4ac05663c --- /dev/null +++ b/docs/22.2.1/categories/index.xml @@ -0,0 +1,14 @@ + + + + Categories on Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/22.2.1/categories/ + Recent content in Categories on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + + + + + + \ No newline at end of file diff --git a/docs/22.2.1/css/atom-one-dark-reasonable.css b/docs/22.2.1/css/atom-one-dark-reasonable.css new file mode 100644 index 000000000..fd41c996a --- /dev/null +++ b/docs/22.2.1/css/atom-one-dark-reasonable.css @@ -0,0 +1,77 @@ +/* + +Atom One Dark With support for ReasonML by Gidi Morris, based off work by Daniel Gamage + +Original One Dark Syntax theme from https://github.com/atom/one-dark-syntax + +*/ +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + line-height: 1.3em; + color: #abb2bf; + background: #282c34; + border-radius: 5px; +} +.hljs-keyword, .hljs-operator { + color: #F92672; +} +.hljs-pattern-match { + color: #F92672; +} +.hljs-pattern-match .hljs-constructor { + color: #61aeee; +} +.hljs-function { + color: #61aeee; +} +.hljs-function .hljs-params { + color: #A6E22E; +} +.hljs-function .hljs-params .hljs-typing { + color: #FD971F; +} +.hljs-module-access .hljs-module { + color: #7e57c2; +} +.hljs-constructor { + color: #e2b93d; +} +.hljs-constructor .hljs-string { + color: #9CCC65; +} +.hljs-comment, .hljs-quote { + color: #b18eb1; + font-style: italic; +} +.hljs-doctag, .hljs-formula { + color: #c678dd; +} +.hljs-section, .hljs-name, .hljs-selector-tag, .hljs-deletion, .hljs-subst { + color: #e06c75; +} +.hljs-literal { + color: #56b6c2; +} +.hljs-string, .hljs-regexp, .hljs-addition, .hljs-attribute, .hljs-meta-string { + color: #98c379; +} +.hljs-built_in, .hljs-class .hljs-title { + color: #e6c07b; +} +.hljs-attr, .hljs-variable, .hljs-template-variable, .hljs-type, .hljs-selector-class, .hljs-selector-attr, .hljs-selector-pseudo, .hljs-number { + color: #d19a66; +} +.hljs-symbol, .hljs-bullet, .hljs-link, .hljs-meta, .hljs-selector-id, .hljs-title { + color: #61aeee; +} +.hljs-emphasis { + font-style: italic; +} +.hljs-strong { + font-weight: bold; +} +.hljs-link { + text-decoration: underline; +} diff --git a/docs/22.2.1/css/auto-complete.css b/docs/22.2.1/css/auto-complete.css new file mode 100644 index 000000000..ac6979ad3 --- /dev/null +++ b/docs/22.2.1/css/auto-complete.css @@ -0,0 +1,47 @@ +.autocomplete-suggestions { + text-align: left; + cursor: default; + border: 1px solid #ccc; + border-top: 0; + background: #fff; + box-shadow: -1px 1px 3px rgba(0,0,0,.1); + + /* core styles should not be changed */ + position: absolute; + display: none; + z-index: 9999; + max-height: 254px; + overflow: hidden; + overflow-y: auto; + box-sizing: border-box; + +} +.autocomplete-suggestion { + position: relative; + cursor: pointer; + padding: 7px; + line-height: 23px; + white-space: nowrap; + overflow: hidden; + text-overflow: ellipsis; + color: #333; +} + +.autocomplete-suggestion b { + font-weight: normal; + color: #1f8dd6; +} + +.autocomplete-suggestion.selected { + background: #333; + color: #fff; +} + +.autocomplete-suggestion:hover { + background: #444; + color: #fff; +} + +.autocomplete-suggestion > .context { + font-size: 12px; +} diff --git a/docs/22.2.1/css/featherlight.min.css b/docs/22.2.1/css/featherlight.min.css new file mode 100644 index 000000000..1b00c7861 --- /dev/null +++ b/docs/22.2.1/css/featherlight.min.css @@ -0,0 +1,8 @@ +/** + * Featherlight - ultra slim jQuery lightbox + * Version 1.7.13 - http://noelboss.github.io/featherlight/ + * + * Copyright (c) 2015, Noël Raoul Bossart (http://www.noelboss.com) + * MIT Licensed. +**/ +html.with-featherlight{overflow:hidden}.featherlight{display:none;position:fixed;top:0;right:0;bottom:0;left:0;z-index:2147483647;text-align:center;white-space:nowrap;cursor:pointer;background:#333;background:rgba(0,0,0,0)}.featherlight:last-of-type{background:rgba(0,0,0,.8)}.featherlight:before{content:'';display:inline-block;height:100%;vertical-align:middle}.featherlight .featherlight-content{position:relative;text-align:left;vertical-align:middle;display:inline-block;overflow:auto;padding:25px 25px 0;border-bottom:25px solid transparent;margin-left:5%;margin-right:5%;max-height:95%;background:#fff;cursor:auto;white-space:normal}.featherlight .featherlight-inner{display:block}.featherlight link.featherlight-inner,.featherlight script.featherlight-inner,.featherlight style.featherlight-inner{display:none}.featherlight .featherlight-close-icon{position:absolute;z-index:9999;top:0;right:0;line-height:25px;width:25px;cursor:pointer;text-align:center;font-family:Arial,sans-serif;background:#fff;background:rgba(255,255,255,.3);color:#000;border:0;padding:0}.featherlight .featherlight-close-icon::-moz-focus-inner{border:0;padding:0}.featherlight .featherlight-image{width:100%}.featherlight-iframe .featherlight-content{border-bottom:0;padding:0;-webkit-overflow-scrolling:touch}.featherlight iframe{border:0}.featherlight *{-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}@media only screen and (max-width:1024px){.featherlight .featherlight-content{margin-left:0;margin-right:0;max-height:98%;padding:10px 10px 0;border-bottom:10px solid transparent}}@media print{html.with-featherlight>*>:not(.featherlight){display:none}} \ No newline at end of file diff --git a/docs/22.2.1/css/fontawesome-all.min.css b/docs/22.2.1/css/fontawesome-all.min.css new file mode 100644 index 000000000..de5647372 --- /dev/null +++ b/docs/22.2.1/css/fontawesome-all.min.css @@ -0,0 +1 @@ +.fa,.fab,.fal,.far,.fas{-moz-osx-font-smoothing:grayscale;-webkit-font-smoothing:antialiased;display:inline-block;font-style:normal;font-variant:normal;text-rendering:auto;line-height:1}.fa-lg{font-size:1.33333em;line-height:.75em;vertical-align:-.0667em}.fa-xs{font-size:.75em}.fa-sm{font-size:.875em}.fa-1x{font-size:1em}.fa-2x{font-size:2em}.fa-3x{font-size:3em}.fa-4x{font-size:4em}.fa-5x{font-size:5em}.fa-6x{font-size:6em}.fa-7x{font-size:7em}.fa-8x{font-size:8em}.fa-9x{font-size:9em}.fa-10x{font-size:10em}.fa-fw{text-align:center;width:1.25em}.fa-ul{list-style-type:none;margin-left:2.5em;padding-left:0}.fa-ul>li{position:relative}.fa-li{left:-2em;position:absolute;text-align:center;width:2em;line-height:inherit}.fa-border{border:.08em solid #eee;border-radius:.1em;padding:.2em .25em .15em}.fa-pull-left{float:left}.fa-pull-right{float:right}.fa.fa-pull-left,.fab.fa-pull-left,.fal.fa-pull-left,.far.fa-pull-left,.fas.fa-pull-left{margin-right:.3em}.fa.fa-pull-right,.fab.fa-pull-right,.fal.fa-pull-right,.far.fa-pull-right,.fas.fa-pull-right{margin-left:.3em}.fa-spin{animation:fa-spin 2s infinite linear}.fa-pulse{animation:fa-spin 1s infinite steps(8)}@keyframes fa-spin{0%{transform:rotate(0deg)}to{transform:rotate(1turn)}}.fa-rotate-90{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=1)";transform:rotate(90deg)}.fa-rotate-180{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2)";transform:rotate(180deg)}.fa-rotate-270{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=3)";transform:rotate(270deg)}.fa-flip-horizontal{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=0, mirror=1)";transform:scaleX(-1)}.fa-flip-vertical{transform:scaleY(-1)}.fa-flip-horizontal.fa-flip-vertical,.fa-flip-vertical{-ms-filter:"progid:DXImageTransform.Microsoft.BasicImage(rotation=2, mirror=1)"}.fa-flip-horizontal.fa-flip-vertical{transform:scale(-1)}:root .fa-flip-horizontal,:root .fa-flip-vertical,:root .fa-rotate-90,:root .fa-rotate-180,:root .fa-rotate-270{filter:none}.fa-stack{display:inline-block;height:2em;line-height:2em;position:relative;vertical-align:middle;width:2.5em}.fa-stack-1x,.fa-stack-2x{left:0;position:absolute;text-align:center;width:100%}.fa-stack-1x{line-height:inherit}.fa-stack-2x{font-size:2em}.fa-inverse{color:#fff}.fa-500px:before{content:"\f26e"}.fa-accessible-icon:before{content:"\f368"}.fa-accusoft:before{content:"\f369"}.fa-acquisitions-incorporated:before{content:"\f6af"}.fa-ad:before{content:"\f641"}.fa-address-book:before{content:"\f2b9"}.fa-address-card:before{content:"\f2bb"}.fa-adjust:before{content:"\f042"}.fa-adn:before{content:"\f170"}.fa-adobe:before{content:"\f778"}.fa-adversal:before{content:"\f36a"}.fa-affiliatetheme:before{content:"\f36b"}.fa-air-freshener:before{content:"\f5d0"}.fa-algolia:before{content:"\f36c"}.fa-align-center:before{content:"\f037"}.fa-align-justify:before{content:"\f039"}.fa-align-left:before{content:"\f036"}.fa-align-right:before{content:"\f038"}.fa-alipay:before{content:"\f642"}.fa-allergies:before{content:"\f461"}.fa-amazon:before{content:"\f270"}.fa-amazon-pay:before{content:"\f42c"}.fa-ambulance:before{content:"\f0f9"}.fa-american-sign-language-interpreting:before{content:"\f2a3"}.fa-amilia:before{content:"\f36d"}.fa-anchor:before{content:"\f13d"}.fa-android:before{content:"\f17b"}.fa-angellist:before{content:"\f209"}.fa-angle-double-down:before{content:"\f103"}.fa-angle-double-left:before{content:"\f100"}.fa-angle-double-right:before{content:"\f101"}.fa-angle-double-up:before{content:"\f102"}.fa-angle-down:before{content:"\f107"}.fa-angle-left:before{content:"\f104"}.fa-angle-right:before{content:"\f105"}.fa-angle-up:before{content:"\f106"}.fa-angry:before{content:"\f556"}.fa-angrycreative:before{content:"\f36e"}.fa-angular:before{content:"\f420"}.fa-ankh:before{content:"\f644"}.fa-app-store:before{content:"\f36f"}.fa-app-store-ios:before{content:"\f370"}.fa-apper:before{content:"\f371"}.fa-apple:before{content:"\f179"}.fa-apple-alt:before{content:"\f5d1"}.fa-apple-pay:before{content:"\f415"}.fa-archive:before{content:"\f187"}.fa-archway:before{content:"\f557"}.fa-arrow-alt-circle-down:before{content:"\f358"}.fa-arrow-alt-circle-left:before{content:"\f359"}.fa-arrow-alt-circle-right:before{content:"\f35a"}.fa-arrow-alt-circle-up:before{content:"\f35b"}.fa-arrow-circle-down:before{content:"\f0ab"}.fa-arrow-circle-left:before{content:"\f0a8"}.fa-arrow-circle-right:before{content:"\f0a9"}.fa-arrow-circle-up:before{content:"\f0aa"}.fa-arrow-down:before{content:"\f063"}.fa-arrow-left:before{content:"\f060"}.fa-arrow-right:before{content:"\f061"}.fa-arrow-up:before{content:"\f062"}.fa-arrows-alt:before{content:"\f0b2"}.fa-arrows-alt-h:before{content:"\f337"}.fa-arrows-alt-v:before{content:"\f338"}.fa-artstation:before{content:"\f77a"}.fa-assistive-listening-systems:before{content:"\f2a2"}.fa-asterisk:before{content:"\f069"}.fa-asymmetrik:before{content:"\f372"}.fa-at:before{content:"\f1fa"}.fa-atlas:before{content:"\f558"}.fa-atlassian:before{content:"\f77b"}.fa-atom:before{content:"\f5d2"}.fa-audible:before{content:"\f373"}.fa-audio-description:before{content:"\f29e"}.fa-autoprefixer:before{content:"\f41c"}.fa-avianex:before{content:"\f374"}.fa-aviato:before{content:"\f421"}.fa-award:before{content:"\f559"}.fa-aws:before{content:"\f375"}.fa-baby:before{content:"\f77c"}.fa-baby-carriage:before{content:"\f77d"}.fa-backspace:before{content:"\f55a"}.fa-backward:before{content:"\f04a"}.fa-balance-scale:before{content:"\f24e"}.fa-ban:before{content:"\f05e"}.fa-band-aid:before{content:"\f462"}.fa-bandcamp:before{content:"\f2d5"}.fa-barcode:before{content:"\f02a"}.fa-bars:before{content:"\f0c9"}.fa-baseball-ball:before{content:"\f433"}.fa-basketball-ball:before{content:"\f434"}.fa-bath:before{content:"\f2cd"}.fa-battery-empty:before{content:"\f244"}.fa-battery-full:before{content:"\f240"}.fa-battery-half:before{content:"\f242"}.fa-battery-quarter:before{content:"\f243"}.fa-battery-three-quarters:before{content:"\f241"}.fa-bed:before{content:"\f236"}.fa-beer:before{content:"\f0fc"}.fa-behance:before{content:"\f1b4"}.fa-behance-square:before{content:"\f1b5"}.fa-bell:before{content:"\f0f3"}.fa-bell-slash:before{content:"\f1f6"}.fa-bezier-curve:before{content:"\f55b"}.fa-bible:before{content:"\f647"}.fa-bicycle:before{content:"\f206"}.fa-bimobject:before{content:"\f378"}.fa-binoculars:before{content:"\f1e5"}.fa-biohazard:before{content:"\f780"}.fa-birthday-cake:before{content:"\f1fd"}.fa-bitbucket:before{content:"\f171"}.fa-bitcoin:before{content:"\f379"}.fa-bity:before{content:"\f37a"}.fa-black-tie:before{content:"\f27e"}.fa-blackberry:before{content:"\f37b"}.fa-blender:before{content:"\f517"}.fa-blender-phone:before{content:"\f6b6"}.fa-blind:before{content:"\f29d"}.fa-blog:before{content:"\f781"}.fa-blogger:before{content:"\f37c"}.fa-blogger-b:before{content:"\f37d"}.fa-bluetooth:before{content:"\f293"}.fa-bluetooth-b:before{content:"\f294"}.fa-bold:before{content:"\f032"}.fa-bolt:before{content:"\f0e7"}.fa-bomb:before{content:"\f1e2"}.fa-bone:before{content:"\f5d7"}.fa-bong:before{content:"\f55c"}.fa-book:before{content:"\f02d"}.fa-book-dead:before{content:"\f6b7"}.fa-book-open:before{content:"\f518"}.fa-book-reader:before{content:"\f5da"}.fa-bookmark:before{content:"\f02e"}.fa-bowling-ball:before{content:"\f436"}.fa-box:before{content:"\f466"}.fa-box-open:before{content:"\f49e"}.fa-boxes:before{content:"\f468"}.fa-braille:before{content:"\f2a1"}.fa-brain:before{content:"\f5dc"}.fa-briefcase:before{content:"\f0b1"}.fa-briefcase-medical:before{content:"\f469"}.fa-broadcast-tower:before{content:"\f519"}.fa-broom:before{content:"\f51a"}.fa-brush:before{content:"\f55d"}.fa-btc:before{content:"\f15a"}.fa-bug:before{content:"\f188"}.fa-building:before{content:"\f1ad"}.fa-bullhorn:before{content:"\f0a1"}.fa-bullseye:before{content:"\f140"}.fa-burn:before{content:"\f46a"}.fa-buromobelexperte:before{content:"\f37f"}.fa-bus:before{content:"\f207"}.fa-bus-alt:before{content:"\f55e"}.fa-business-time:before{content:"\f64a"}.fa-buysellads:before{content:"\f20d"}.fa-calculator:before{content:"\f1ec"}.fa-calendar:before{content:"\f133"}.fa-calendar-alt:before{content:"\f073"}.fa-calendar-check:before{content:"\f274"}.fa-calendar-day:before{content:"\f783"}.fa-calendar-minus:before{content:"\f272"}.fa-calendar-plus:before{content:"\f271"}.fa-calendar-times:before{content:"\f273"}.fa-calendar-week:before{content:"\f784"}.fa-camera:before{content:"\f030"}.fa-camera-retro:before{content:"\f083"}.fa-campground:before{content:"\f6bb"}.fa-canadian-maple-leaf:before{content:"\f785"}.fa-candy-cane:before{content:"\f786"}.fa-cannabis:before{content:"\f55f"}.fa-capsules:before{content:"\f46b"}.fa-car:before{content:"\f1b9"}.fa-car-alt:before{content:"\f5de"}.fa-car-battery:before{content:"\f5df"}.fa-car-crash:before{content:"\f5e1"}.fa-car-side:before{content:"\f5e4"}.fa-caret-down:before{content:"\f0d7"}.fa-caret-left:before{content:"\f0d9"}.fa-caret-right:before{content:"\f0da"}.fa-caret-square-down:before{content:"\f150"}.fa-caret-square-left:before{content:"\f191"}.fa-caret-square-right:before{content:"\f152"}.fa-caret-square-up:before{content:"\f151"}.fa-caret-up:before{content:"\f0d8"}.fa-carrot:before{content:"\f787"}.fa-cart-arrow-down:before{content:"\f218"}.fa-cart-plus:before{content:"\f217"}.fa-cash-register:before{content:"\f788"}.fa-cat:before{content:"\f6be"}.fa-cc-amazon-pay:before{content:"\f42d"}.fa-cc-amex:before{content:"\f1f3"}.fa-cc-apple-pay:before{content:"\f416"}.fa-cc-diners-club:before{content:"\f24c"}.fa-cc-discover:before{content:"\f1f2"}.fa-cc-jcb:before{content:"\f24b"}.fa-cc-mastercard:before{content:"\f1f1"}.fa-cc-paypal:before{content:"\f1f4"}.fa-cc-stripe:before{content:"\f1f5"}.fa-cc-visa:before{content:"\f1f0"}.fa-centercode:before{content:"\f380"}.fa-centos:before{content:"\f789"}.fa-certificate:before{content:"\f0a3"}.fa-chair:before{content:"\f6c0"}.fa-chalkboard:before{content:"\f51b"}.fa-chalkboard-teacher:before{content:"\f51c"}.fa-charging-station:before{content:"\f5e7"}.fa-chart-area:before{content:"\f1fe"}.fa-chart-bar:before{content:"\f080"}.fa-chart-line:before{content:"\f201"}.fa-chart-pie:before{content:"\f200"}.fa-check:before{content:"\f00c"}.fa-check-circle:before{content:"\f058"}.fa-check-double:before{content:"\f560"}.fa-check-square:before{content:"\f14a"}.fa-chess:before{content:"\f439"}.fa-chess-bishop:before{content:"\f43a"}.fa-chess-board:before{content:"\f43c"}.fa-chess-king:before{content:"\f43f"}.fa-chess-knight:before{content:"\f441"}.fa-chess-pawn:before{content:"\f443"}.fa-chess-queen:before{content:"\f445"}.fa-chess-rook:before{content:"\f447"}.fa-chevron-circle-down:before{content:"\f13a"}.fa-chevron-circle-left:before{content:"\f137"}.fa-chevron-circle-right:before{content:"\f138"}.fa-chevron-circle-up:before{content:"\f139"}.fa-chevron-down:before{content:"\f078"}.fa-chevron-left:before{content:"\f053"}.fa-chevron-right:before{content:"\f054"}.fa-chevron-up:before{content:"\f077"}.fa-child:before{content:"\f1ae"}.fa-chrome:before{content:"\f268"}.fa-church:before{content:"\f51d"}.fa-circle:before{content:"\f111"}.fa-circle-notch:before{content:"\f1ce"}.fa-city:before{content:"\f64f"}.fa-clipboard:before{content:"\f328"}.fa-clipboard-check:before{content:"\f46c"}.fa-clipboard-list:before{content:"\f46d"}.fa-clock:before{content:"\f017"}.fa-clone:before{content:"\f24d"}.fa-closed-captioning:before{content:"\f20a"}.fa-cloud:before{content:"\f0c2"}.fa-cloud-download-alt:before{content:"\f381"}.fa-cloud-meatball:before{content:"\f73b"}.fa-cloud-moon:before{content:"\f6c3"}.fa-cloud-moon-rain:before{content:"\f73c"}.fa-cloud-rain:before{content:"\f73d"}.fa-cloud-showers-heavy:before{content:"\f740"}.fa-cloud-sun:before{content:"\f6c4"}.fa-cloud-sun-rain:before{content:"\f743"}.fa-cloud-upload-alt:before{content:"\f382"}.fa-cloudscale:before{content:"\f383"}.fa-cloudsmith:before{content:"\f384"}.fa-cloudversify:before{content:"\f385"}.fa-cocktail:before{content:"\f561"}.fa-code:before{content:"\f121"}.fa-code-branch:before{content:"\f126"}.fa-codepen:before{content:"\f1cb"}.fa-codiepie:before{content:"\f284"}.fa-coffee:before{content:"\f0f4"}.fa-cog:before{content:"\f013"}.fa-cogs:before{content:"\f085"}.fa-coins:before{content:"\f51e"}.fa-columns:before{content:"\f0db"}.fa-comment:before{content:"\f075"}.fa-comment-alt:before{content:"\f27a"}.fa-comment-dollar:before{content:"\f651"}.fa-comment-dots:before{content:"\f4ad"}.fa-comment-slash:before{content:"\f4b3"}.fa-comments:before{content:"\f086"}.fa-comments-dollar:before{content:"\f653"}.fa-compact-disc:before{content:"\f51f"}.fa-compass:before{content:"\f14e"}.fa-compress:before{content:"\f066"}.fa-compress-arrows-alt:before{content:"\f78c"}.fa-concierge-bell:before{content:"\f562"}.fa-confluence:before{content:"\f78d"}.fa-connectdevelop:before{content:"\f20e"}.fa-contao:before{content:"\f26d"}.fa-cookie:before{content:"\f563"}.fa-cookie-bite:before{content:"\f564"}.fa-copy:before{content:"\f0c5"}.fa-copyright:before{content:"\f1f9"}.fa-couch:before{content:"\f4b8"}.fa-cpanel:before{content:"\f388"}.fa-creative-commons:before{content:"\f25e"}.fa-creative-commons-by:before{content:"\f4e7"}.fa-creative-commons-nc:before{content:"\f4e8"}.fa-creative-commons-nc-eu:before{content:"\f4e9"}.fa-creative-commons-nc-jp:before{content:"\f4ea"}.fa-creative-commons-nd:before{content:"\f4eb"}.fa-creative-commons-pd:before{content:"\f4ec"}.fa-creative-commons-pd-alt:before{content:"\f4ed"}.fa-creative-commons-remix:before{content:"\f4ee"}.fa-creative-commons-sa:before{content:"\f4ef"}.fa-creative-commons-sampling:before{content:"\f4f0"}.fa-creative-commons-sampling-plus:before{content:"\f4f1"}.fa-creative-commons-share:before{content:"\f4f2"}.fa-creative-commons-zero:before{content:"\f4f3"}.fa-credit-card:before{content:"\f09d"}.fa-critical-role:before{content:"\f6c9"}.fa-crop:before{content:"\f125"}.fa-crop-alt:before{content:"\f565"}.fa-cross:before{content:"\f654"}.fa-crosshairs:before{content:"\f05b"}.fa-crow:before{content:"\f520"}.fa-crown:before{content:"\f521"}.fa-css3:before{content:"\f13c"}.fa-css3-alt:before{content:"\f38b"}.fa-cube:before{content:"\f1b2"}.fa-cubes:before{content:"\f1b3"}.fa-cut:before{content:"\f0c4"}.fa-cuttlefish:before{content:"\f38c"}.fa-d-and-d:before{content:"\f38d"}.fa-d-and-d-beyond:before{content:"\f6ca"}.fa-dashcube:before{content:"\f210"}.fa-database:before{content:"\f1c0"}.fa-deaf:before{content:"\f2a4"}.fa-delicious:before{content:"\f1a5"}.fa-democrat:before{content:"\f747"}.fa-deploydog:before{content:"\f38e"}.fa-deskpro:before{content:"\f38f"}.fa-desktop:before{content:"\f108"}.fa-dev:before{content:"\f6cc"}.fa-deviantart:before{content:"\f1bd"}.fa-dharmachakra:before{content:"\f655"}.fa-dhl:before{content:"\f790"}.fa-diagnoses:before{content:"\f470"}.fa-diaspora:before{content:"\f791"}.fa-dice:before{content:"\f522"}.fa-dice-d20:before{content:"\f6cf"}.fa-dice-d6:before{content:"\f6d1"}.fa-dice-five:before{content:"\f523"}.fa-dice-four:before{content:"\f524"}.fa-dice-one:before{content:"\f525"}.fa-dice-six:before{content:"\f526"}.fa-dice-three:before{content:"\f527"}.fa-dice-two:before{content:"\f528"}.fa-digg:before{content:"\f1a6"}.fa-digital-ocean:before{content:"\f391"}.fa-digital-tachograph:before{content:"\f566"}.fa-directions:before{content:"\f5eb"}.fa-discord:before{content:"\f392"}.fa-discourse:before{content:"\f393"}.fa-divide:before{content:"\f529"}.fa-dizzy:before{content:"\f567"}.fa-dna:before{content:"\f471"}.fa-dochub:before{content:"\f394"}.fa-docker:before{content:"\f395"}.fa-dog:before{content:"\f6d3"}.fa-dollar-sign:before{content:"\f155"}.fa-dolly:before{content:"\f472"}.fa-dolly-flatbed:before{content:"\f474"}.fa-donate:before{content:"\f4b9"}.fa-door-closed:before{content:"\f52a"}.fa-door-open:before{content:"\f52b"}.fa-dot-circle:before{content:"\f192"}.fa-dove:before{content:"\f4ba"}.fa-download:before{content:"\f019"}.fa-draft2digital:before{content:"\f396"}.fa-drafting-compass:before{content:"\f568"}.fa-dragon:before{content:"\f6d5"}.fa-draw-polygon:before{content:"\f5ee"}.fa-dribbble:before{content:"\f17d"}.fa-dribbble-square:before{content:"\f397"}.fa-dropbox:before{content:"\f16b"}.fa-drum:before{content:"\f569"}.fa-drum-steelpan:before{content:"\f56a"}.fa-drumstick-bite:before{content:"\f6d7"}.fa-drupal:before{content:"\f1a9"}.fa-dumbbell:before{content:"\f44b"}.fa-dumpster:before{content:"\f793"}.fa-dumpster-fire:before{content:"\f794"}.fa-dungeon:before{content:"\f6d9"}.fa-dyalog:before{content:"\f399"}.fa-earlybirds:before{content:"\f39a"}.fa-ebay:before{content:"\f4f4"}.fa-edge:before{content:"\f282"}.fa-edit:before{content:"\f044"}.fa-eject:before{content:"\f052"}.fa-elementor:before{content:"\f430"}.fa-ellipsis-h:before{content:"\f141"}.fa-ellipsis-v:before{content:"\f142"}.fa-ello:before{content:"\f5f1"}.fa-ember:before{content:"\f423"}.fa-empire:before{content:"\f1d1"}.fa-envelope:before{content:"\f0e0"}.fa-envelope-open:before{content:"\f2b6"}.fa-envelope-open-text:before{content:"\f658"}.fa-envelope-square:before{content:"\f199"}.fa-envira:before{content:"\f299"}.fa-equals:before{content:"\f52c"}.fa-eraser:before{content:"\f12d"}.fa-erlang:before{content:"\f39d"}.fa-ethereum:before{content:"\f42e"}.fa-ethernet:before{content:"\f796"}.fa-etsy:before{content:"\f2d7"}.fa-euro-sign:before{content:"\f153"}.fa-exchange-alt:before{content:"\f362"}.fa-exclamation:before{content:"\f12a"}.fa-exclamation-circle:before{content:"\f06a"}.fa-exclamation-triangle:before{content:"\f071"}.fa-expand:before{content:"\f065"}.fa-expand-arrows-alt:before{content:"\f31e"}.fa-expeditedssl:before{content:"\f23e"}.fa-external-link-alt:before{content:"\f35d"}.fa-external-link-square-alt:before{content:"\f360"}.fa-eye:before{content:"\f06e"}.fa-eye-dropper:before{content:"\f1fb"}.fa-eye-slash:before{content:"\f070"}.fa-facebook:before{content:"\f09a"}.fa-facebook-f:before{content:"\f39e"}.fa-facebook-messenger:before{content:"\f39f"}.fa-facebook-square:before{content:"\f082"}.fa-fantasy-flight-games:before{content:"\f6dc"}.fa-fast-backward:before{content:"\f049"}.fa-fast-forward:before{content:"\f050"}.fa-fax:before{content:"\f1ac"}.fa-feather:before{content:"\f52d"}.fa-feather-alt:before{content:"\f56b"}.fa-fedex:before{content:"\f797"}.fa-fedora:before{content:"\f798"}.fa-female:before{content:"\f182"}.fa-fighter-jet:before{content:"\f0fb"}.fa-figma:before{content:"\f799"}.fa-file:before{content:"\f15b"}.fa-file-alt:before{content:"\f15c"}.fa-file-archive:before{content:"\f1c6"}.fa-file-audio:before{content:"\f1c7"}.fa-file-code:before{content:"\f1c9"}.fa-file-contract:before{content:"\f56c"}.fa-file-csv:before{content:"\f6dd"}.fa-file-download:before{content:"\f56d"}.fa-file-excel:before{content:"\f1c3"}.fa-file-export:before{content:"\f56e"}.fa-file-image:before{content:"\f1c5"}.fa-file-import:before{content:"\f56f"}.fa-file-invoice:before{content:"\f570"}.fa-file-invoice-dollar:before{content:"\f571"}.fa-file-medical:before{content:"\f477"}.fa-file-medical-alt:before{content:"\f478"}.fa-file-pdf:before{content:"\f1c1"}.fa-file-powerpoint:before{content:"\f1c4"}.fa-file-prescription:before{content:"\f572"}.fa-file-signature:before{content:"\f573"}.fa-file-upload:before{content:"\f574"}.fa-file-video:before{content:"\f1c8"}.fa-file-word:before{content:"\f1c2"}.fa-fill:before{content:"\f575"}.fa-fill-drip:before{content:"\f576"}.fa-film:before{content:"\f008"}.fa-filter:before{content:"\f0b0"}.fa-fingerprint:before{content:"\f577"}.fa-fire:before{content:"\f06d"}.fa-fire-alt:before{content:"\f7e4"}.fa-fire-extinguisher:before{content:"\f134"}.fa-firefox:before{content:"\f269"}.fa-first-aid:before{content:"\f479"}.fa-first-order:before{content:"\f2b0"}.fa-first-order-alt:before{content:"\f50a"}.fa-firstdraft:before{content:"\f3a1"}.fa-fish:before{content:"\f578"}.fa-fist-raised:before{content:"\f6de"}.fa-flag:before{content:"\f024"}.fa-flag-checkered:before{content:"\f11e"}.fa-flag-usa:before{content:"\f74d"}.fa-flask:before{content:"\f0c3"}.fa-flickr:before{content:"\f16e"}.fa-flipboard:before{content:"\f44d"}.fa-flushed:before{content:"\f579"}.fa-fly:before{content:"\f417"}.fa-folder:before{content:"\f07b"}.fa-folder-minus:before{content:"\f65d"}.fa-folder-open:before{content:"\f07c"}.fa-folder-plus:before{content:"\f65e"}.fa-font:before{content:"\f031"}.fa-font-awesome:before{content:"\f2b4"}.fa-font-awesome-alt:before{content:"\f35c"}.fa-font-awesome-flag:before{content:"\f425"}.fa-font-awesome-logo-full:before{content:"\f4e6"}.fa-fonticons:before{content:"\f280"}.fa-fonticons-fi:before{content:"\f3a2"}.fa-football-ball:before{content:"\f44e"}.fa-fort-awesome:before{content:"\f286"}.fa-fort-awesome-alt:before{content:"\f3a3"}.fa-forumbee:before{content:"\f211"}.fa-forward:before{content:"\f04e"}.fa-foursquare:before{content:"\f180"}.fa-free-code-camp:before{content:"\f2c5"}.fa-freebsd:before{content:"\f3a4"}.fa-frog:before{content:"\f52e"}.fa-frown:before{content:"\f119"}.fa-frown-open:before{content:"\f57a"}.fa-fulcrum:before{content:"\f50b"}.fa-funnel-dollar:before{content:"\f662"}.fa-futbol:before{content:"\f1e3"}.fa-galactic-republic:before{content:"\f50c"}.fa-galactic-senate:before{content:"\f50d"}.fa-gamepad:before{content:"\f11b"}.fa-gas-pump:before{content:"\f52f"}.fa-gavel:before{content:"\f0e3"}.fa-gem:before{content:"\f3a5"}.fa-genderless:before{content:"\f22d"}.fa-get-pocket:before{content:"\f265"}.fa-gg:before{content:"\f260"}.fa-gg-circle:before{content:"\f261"}.fa-ghost:before{content:"\f6e2"}.fa-gift:before{content:"\f06b"}.fa-gifts:before{content:"\f79c"}.fa-git:before{content:"\f1d3"}.fa-git-square:before{content:"\f1d2"}.fa-github:before{content:"\f09b"}.fa-github-alt:before{content:"\f113"}.fa-github-square:before{content:"\f092"}.fa-gitkraken:before{content:"\f3a6"}.fa-gitlab:before{content:"\f296"}.fa-gitter:before{content:"\f426"}.fa-glass-cheers:before{content:"\f79f"}.fa-glass-martini:before{content:"\f000"}.fa-glass-martini-alt:before{content:"\f57b"}.fa-glass-whiskey:before{content:"\f7a0"}.fa-glasses:before{content:"\f530"}.fa-glide:before{content:"\f2a5"}.fa-glide-g:before{content:"\f2a6"}.fa-globe:before{content:"\f0ac"}.fa-globe-africa:before{content:"\f57c"}.fa-globe-americas:before{content:"\f57d"}.fa-globe-asia:before{content:"\f57e"}.fa-globe-europe:before{content:"\f7a2"}.fa-gofore:before{content:"\f3a7"}.fa-golf-ball:before{content:"\f450"}.fa-goodreads:before{content:"\f3a8"}.fa-goodreads-g:before{content:"\f3a9"}.fa-google:before{content:"\f1a0"}.fa-google-drive:before{content:"\f3aa"}.fa-google-play:before{content:"\f3ab"}.fa-google-plus:before{content:"\f2b3"}.fa-google-plus-g:before{content:"\f0d5"}.fa-google-plus-square:before{content:"\f0d4"}.fa-google-wallet:before{content:"\f1ee"}.fa-gopuram:before{content:"\f664"}.fa-graduation-cap:before{content:"\f19d"}.fa-gratipay:before{content:"\f184"}.fa-grav:before{content:"\f2d6"}.fa-greater-than:before{content:"\f531"}.fa-greater-than-equal:before{content:"\f532"}.fa-grimace:before{content:"\f57f"}.fa-grin:before{content:"\f580"}.fa-grin-alt:before{content:"\f581"}.fa-grin-beam:before{content:"\f582"}.fa-grin-beam-sweat:before{content:"\f583"}.fa-grin-hearts:before{content:"\f584"}.fa-grin-squint:before{content:"\f585"}.fa-grin-squint-tears:before{content:"\f586"}.fa-grin-stars:before{content:"\f587"}.fa-grin-tears:before{content:"\f588"}.fa-grin-tongue:before{content:"\f589"}.fa-grin-tongue-squint:before{content:"\f58a"}.fa-grin-tongue-wink:before{content:"\f58b"}.fa-grin-wink:before{content:"\f58c"}.fa-grip-horizontal:before{content:"\f58d"}.fa-grip-lines:before{content:"\f7a4"}.fa-grip-lines-vertical:before{content:"\f7a5"}.fa-grip-vertical:before{content:"\f58e"}.fa-gripfire:before{content:"\f3ac"}.fa-grunt:before{content:"\f3ad"}.fa-guitar:before{content:"\f7a6"}.fa-gulp:before{content:"\f3ae"}.fa-h-square:before{content:"\f0fd"}.fa-hacker-news:before{content:"\f1d4"}.fa-hacker-news-square:before{content:"\f3af"}.fa-hackerrank:before{content:"\f5f7"}.fa-hammer:before{content:"\f6e3"}.fa-hamsa:before{content:"\f665"}.fa-hand-holding:before{content:"\f4bd"}.fa-hand-holding-heart:before{content:"\f4be"}.fa-hand-holding-usd:before{content:"\f4c0"}.fa-hand-lizard:before{content:"\f258"}.fa-hand-paper:before{content:"\f256"}.fa-hand-peace:before{content:"\f25b"}.fa-hand-point-down:before{content:"\f0a7"}.fa-hand-point-left:before{content:"\f0a5"}.fa-hand-point-right:before{content:"\f0a4"}.fa-hand-point-up:before{content:"\f0a6"}.fa-hand-pointer:before{content:"\f25a"}.fa-hand-rock:before{content:"\f255"}.fa-hand-scissors:before{content:"\f257"}.fa-hand-spock:before{content:"\f259"}.fa-hands:before{content:"\f4c2"}.fa-hands-helping:before{content:"\f4c4"}.fa-handshake:before{content:"\f2b5"}.fa-hanukiah:before{content:"\f6e6"}.fa-hashtag:before{content:"\f292"}.fa-hat-wizard:before{content:"\f6e8"}.fa-haykal:before{content:"\f666"}.fa-hdd:before{content:"\f0a0"}.fa-heading:before{content:"\f1dc"}.fa-headphones:before{content:"\f025"}.fa-headphones-alt:before{content:"\f58f"}.fa-headset:before{content:"\f590"}.fa-heart:before{content:"\f004"}.fa-heart-broken:before{content:"\f7a9"}.fa-heartbeat:before{content:"\f21e"}.fa-helicopter:before{content:"\f533"}.fa-highlighter:before{content:"\f591"}.fa-hiking:before{content:"\f6ec"}.fa-hippo:before{content:"\f6ed"}.fa-hips:before{content:"\f452"}.fa-hire-a-helper:before{content:"\f3b0"}.fa-history:before{content:"\f1da"}.fa-hockey-puck:before{content:"\f453"}.fa-holly-berry:before{content:"\f7aa"}.fa-home:before{content:"\f015"}.fa-hooli:before{content:"\f427"}.fa-hornbill:before{content:"\f592"}.fa-horse:before{content:"\f6f0"}.fa-horse-head:before{content:"\f7ab"}.fa-hospital:before{content:"\f0f8"}.fa-hospital-alt:before{content:"\f47d"}.fa-hospital-symbol:before{content:"\f47e"}.fa-hot-tub:before{content:"\f593"}.fa-hotel:before{content:"\f594"}.fa-hotjar:before{content:"\f3b1"}.fa-hourglass:before{content:"\f254"}.fa-hourglass-end:before{content:"\f253"}.fa-hourglass-half:before{content:"\f252"}.fa-hourglass-start:before{content:"\f251"}.fa-house-damage:before{content:"\f6f1"}.fa-houzz:before{content:"\f27c"}.fa-hryvnia:before{content:"\f6f2"}.fa-html5:before{content:"\f13b"}.fa-hubspot:before{content:"\f3b2"}.fa-i-cursor:before{content:"\f246"}.fa-icicles:before{content:"\f7ad"}.fa-id-badge:before{content:"\f2c1"}.fa-id-card:before{content:"\f2c2"}.fa-id-card-alt:before{content:"\f47f"}.fa-igloo:before{content:"\f7ae"}.fa-image:before{content:"\f03e"}.fa-images:before{content:"\f302"}.fa-imdb:before{content:"\f2d8"}.fa-inbox:before{content:"\f01c"}.fa-indent:before{content:"\f03c"}.fa-industry:before{content:"\f275"}.fa-infinity:before{content:"\f534"}.fa-info:before{content:"\f129"}.fa-info-circle:before{content:"\f05a"}.fa-instagram:before{content:"\f16d"}.fa-intercom:before{content:"\f7af"}.fa-internet-explorer:before{content:"\f26b"}.fa-invision:before{content:"\f7b0"}.fa-ioxhost:before{content:"\f208"}.fa-italic:before{content:"\f033"}.fa-itunes:before{content:"\f3b4"}.fa-itunes-note:before{content:"\f3b5"}.fa-java:before{content:"\f4e4"}.fa-jedi:before{content:"\f669"}.fa-jedi-order:before{content:"\f50e"}.fa-jenkins:before{content:"\f3b6"}.fa-jira:before{content:"\f7b1"}.fa-joget:before{content:"\f3b7"}.fa-joint:before{content:"\f595"}.fa-joomla:before{content:"\f1aa"}.fa-journal-whills:before{content:"\f66a"}.fa-js:before{content:"\f3b8"}.fa-js-square:before{content:"\f3b9"}.fa-jsfiddle:before{content:"\f1cc"}.fa-kaaba:before{content:"\f66b"}.fa-kaggle:before{content:"\f5fa"}.fa-key:before{content:"\f084"}.fa-keybase:before{content:"\f4f5"}.fa-keyboard:before{content:"\f11c"}.fa-keycdn:before{content:"\f3ba"}.fa-khanda:before{content:"\f66d"}.fa-kickstarter:before{content:"\f3bb"}.fa-kickstarter-k:before{content:"\f3bc"}.fa-kiss:before{content:"\f596"}.fa-kiss-beam:before{content:"\f597"}.fa-kiss-wink-heart:before{content:"\f598"}.fa-kiwi-bird:before{content:"\f535"}.fa-korvue:before{content:"\f42f"}.fa-landmark:before{content:"\f66f"}.fa-language:before{content:"\f1ab"}.fa-laptop:before{content:"\f109"}.fa-laptop-code:before{content:"\f5fc"}.fa-laravel:before{content:"\f3bd"}.fa-lastfm:before{content:"\f202"}.fa-lastfm-square:before{content:"\f203"}.fa-laugh:before{content:"\f599"}.fa-laugh-beam:before{content:"\f59a"}.fa-laugh-squint:before{content:"\f59b"}.fa-laugh-wink:before{content:"\f59c"}.fa-layer-group:before{content:"\f5fd"}.fa-leaf:before{content:"\f06c"}.fa-leanpub:before{content:"\f212"}.fa-lemon:before{content:"\f094"}.fa-less:before{content:"\f41d"}.fa-less-than:before{content:"\f536"}.fa-less-than-equal:before{content:"\f537"}.fa-level-down-alt:before{content:"\f3be"}.fa-level-up-alt:before{content:"\f3bf"}.fa-life-ring:before{content:"\f1cd"}.fa-lightbulb:before{content:"\f0eb"}.fa-line:before{content:"\f3c0"}.fa-link:before{content:"\f0c1"}.fa-linkedin:before{content:"\f08c"}.fa-linkedin-in:before{content:"\f0e1"}.fa-linode:before{content:"\f2b8"}.fa-linux:before{content:"\f17c"}.fa-lira-sign:before{content:"\f195"}.fa-list:before{content:"\f03a"}.fa-list-alt:before{content:"\f022"}.fa-list-ol:before{content:"\f0cb"}.fa-list-ul:before{content:"\f0ca"}.fa-location-arrow:before{content:"\f124"}.fa-lock:before{content:"\f023"}.fa-lock-open:before{content:"\f3c1"}.fa-long-arrow-alt-down:before{content:"\f309"}.fa-long-arrow-alt-left:before{content:"\f30a"}.fa-long-arrow-alt-right:before{content:"\f30b"}.fa-long-arrow-alt-up:before{content:"\f30c"}.fa-low-vision:before{content:"\f2a8"}.fa-luggage-cart:before{content:"\f59d"}.fa-lyft:before{content:"\f3c3"}.fa-magento:before{content:"\f3c4"}.fa-magic:before{content:"\f0d0"}.fa-magnet:before{content:"\f076"}.fa-mail-bulk:before{content:"\f674"}.fa-mailchimp:before{content:"\f59e"}.fa-male:before{content:"\f183"}.fa-mandalorian:before{content:"\f50f"}.fa-map:before{content:"\f279"}.fa-map-marked:before{content:"\f59f"}.fa-map-marked-alt:before{content:"\f5a0"}.fa-map-marker:before{content:"\f041"}.fa-map-marker-alt:before{content:"\f3c5"}.fa-map-pin:before{content:"\f276"}.fa-map-signs:before{content:"\f277"}.fa-markdown:before{content:"\f60f"}.fa-marker:before{content:"\f5a1"}.fa-mars:before{content:"\f222"}.fa-mars-double:before{content:"\f227"}.fa-mars-stroke:before{content:"\f229"}.fa-mars-stroke-h:before{content:"\f22b"}.fa-mars-stroke-v:before{content:"\f22a"}.fa-mask:before{content:"\f6fa"}.fa-mastodon:before{content:"\f4f6"}.fa-maxcdn:before{content:"\f136"}.fa-medal:before{content:"\f5a2"}.fa-medapps:before{content:"\f3c6"}.fa-medium:before{content:"\f23a"}.fa-medium-m:before{content:"\f3c7"}.fa-medkit:before{content:"\f0fa"}.fa-medrt:before{content:"\f3c8"}.fa-meetup:before{content:"\f2e0"}.fa-megaport:before{content:"\f5a3"}.fa-meh:before{content:"\f11a"}.fa-meh-blank:before{content:"\f5a4"}.fa-meh-rolling-eyes:before{content:"\f5a5"}.fa-memory:before{content:"\f538"}.fa-mendeley:before{content:"\f7b3"}.fa-menorah:before{content:"\f676"}.fa-mercury:before{content:"\f223"}.fa-meteor:before{content:"\f753"}.fa-microchip:before{content:"\f2db"}.fa-microphone:before{content:"\f130"}.fa-microphone-alt:before{content:"\f3c9"}.fa-microphone-alt-slash:before{content:"\f539"}.fa-microphone-slash:before{content:"\f131"}.fa-microscope:before{content:"\f610"}.fa-microsoft:before{content:"\f3ca"}.fa-minus:before{content:"\f068"}.fa-minus-circle:before{content:"\f056"}.fa-minus-square:before{content:"\f146"}.fa-mitten:before{content:"\f7b5"}.fa-mix:before{content:"\f3cb"}.fa-mixcloud:before{content:"\f289"}.fa-mizuni:before{content:"\f3cc"}.fa-mobile:before{content:"\f10b"}.fa-mobile-alt:before{content:"\f3cd"}.fa-modx:before{content:"\f285"}.fa-monero:before{content:"\f3d0"}.fa-money-bill:before{content:"\f0d6"}.fa-money-bill-alt:before{content:"\f3d1"}.fa-money-bill-wave:before{content:"\f53a"}.fa-money-bill-wave-alt:before{content:"\f53b"}.fa-money-check:before{content:"\f53c"}.fa-money-check-alt:before{content:"\f53d"}.fa-monument:before{content:"\f5a6"}.fa-moon:before{content:"\f186"}.fa-mortar-pestle:before{content:"\f5a7"}.fa-mosque:before{content:"\f678"}.fa-motorcycle:before{content:"\f21c"}.fa-mountain:before{content:"\f6fc"}.fa-mouse-pointer:before{content:"\f245"}.fa-mug-hot:before{content:"\f7b6"}.fa-music:before{content:"\f001"}.fa-napster:before{content:"\f3d2"}.fa-neos:before{content:"\f612"}.fa-network-wired:before{content:"\f6ff"}.fa-neuter:before{content:"\f22c"}.fa-newspaper:before{content:"\f1ea"}.fa-nimblr:before{content:"\f5a8"}.fa-nintendo-switch:before{content:"\f418"}.fa-node:before{content:"\f419"}.fa-node-js:before{content:"\f3d3"}.fa-not-equal:before{content:"\f53e"}.fa-notes-medical:before{content:"\f481"}.fa-npm:before{content:"\f3d4"}.fa-ns8:before{content:"\f3d5"}.fa-nutritionix:before{content:"\f3d6"}.fa-object-group:before{content:"\f247"}.fa-object-ungroup:before{content:"\f248"}.fa-odnoklassniki:before{content:"\f263"}.fa-odnoklassniki-square:before{content:"\f264"}.fa-oil-can:before{content:"\f613"}.fa-old-republic:before{content:"\f510"}.fa-om:before{content:"\f679"}.fa-opencart:before{content:"\f23d"}.fa-openid:before{content:"\f19b"}.fa-opera:before{content:"\f26a"}.fa-optin-monster:before{content:"\f23c"}.fa-osi:before{content:"\f41a"}.fa-otter:before{content:"\f700"}.fa-outdent:before{content:"\f03b"}.fa-page4:before{content:"\f3d7"}.fa-pagelines:before{content:"\f18c"}.fa-paint-brush:before{content:"\f1fc"}.fa-paint-roller:before{content:"\f5aa"}.fa-palette:before{content:"\f53f"}.fa-palfed:before{content:"\f3d8"}.fa-pallet:before{content:"\f482"}.fa-paper-plane:before{content:"\f1d8"}.fa-paperclip:before{content:"\f0c6"}.fa-parachute-box:before{content:"\f4cd"}.fa-paragraph:before{content:"\f1dd"}.fa-parking:before{content:"\f540"}.fa-passport:before{content:"\f5ab"}.fa-pastafarianism:before{content:"\f67b"}.fa-paste:before{content:"\f0ea"}.fa-patreon:before{content:"\f3d9"}.fa-pause:before{content:"\f04c"}.fa-pause-circle:before{content:"\f28b"}.fa-paw:before{content:"\f1b0"}.fa-paypal:before{content:"\f1ed"}.fa-peace:before{content:"\f67c"}.fa-pen:before{content:"\f304"}.fa-pen-alt:before{content:"\f305"}.fa-pen-fancy:before{content:"\f5ac"}.fa-pen-nib:before{content:"\f5ad"}.fa-pen-square:before{content:"\f14b"}.fa-pencil-alt:before{content:"\f303"}.fa-pencil-ruler:before{content:"\f5ae"}.fa-penny-arcade:before{content:"\f704"}.fa-people-carry:before{content:"\f4ce"}.fa-percent:before{content:"\f295"}.fa-percentage:before{content:"\f541"}.fa-periscope:before{content:"\f3da"}.fa-person-booth:before{content:"\f756"}.fa-phabricator:before{content:"\f3db"}.fa-phoenix-framework:before{content:"\f3dc"}.fa-phoenix-squadron:before{content:"\f511"}.fa-phone:before{content:"\f095"}.fa-phone-slash:before{content:"\f3dd"}.fa-phone-square:before{content:"\f098"}.fa-phone-volume:before{content:"\f2a0"}.fa-php:before{content:"\f457"}.fa-pied-piper:before{content:"\f2ae"}.fa-pied-piper-alt:before{content:"\f1a8"}.fa-pied-piper-hat:before{content:"\f4e5"}.fa-pied-piper-pp:before{content:"\f1a7"}.fa-piggy-bank:before{content:"\f4d3"}.fa-pills:before{content:"\f484"}.fa-pinterest:before{content:"\f0d2"}.fa-pinterest-p:before{content:"\f231"}.fa-pinterest-square:before{content:"\f0d3"}.fa-place-of-worship:before{content:"\f67f"}.fa-plane:before{content:"\f072"}.fa-plane-arrival:before{content:"\f5af"}.fa-plane-departure:before{content:"\f5b0"}.fa-play:before{content:"\f04b"}.fa-play-circle:before{content:"\f144"}.fa-playstation:before{content:"\f3df"}.fa-plug:before{content:"\f1e6"}.fa-plus:before{content:"\f067"}.fa-plus-circle:before{content:"\f055"}.fa-plus-square:before{content:"\f0fe"}.fa-podcast:before{content:"\f2ce"}.fa-poll:before{content:"\f681"}.fa-poll-h:before{content:"\f682"}.fa-poo:before{content:"\f2fe"}.fa-poo-storm:before{content:"\f75a"}.fa-poop:before{content:"\f619"}.fa-portrait:before{content:"\f3e0"}.fa-pound-sign:before{content:"\f154"}.fa-power-off:before{content:"\f011"}.fa-pray:before{content:"\f683"}.fa-praying-hands:before{content:"\f684"}.fa-prescription:before{content:"\f5b1"}.fa-prescription-bottle:before{content:"\f485"}.fa-prescription-bottle-alt:before{content:"\f486"}.fa-print:before{content:"\f02f"}.fa-procedures:before{content:"\f487"}.fa-product-hunt:before{content:"\f288"}.fa-project-diagram:before{content:"\f542"}.fa-pushed:before{content:"\f3e1"}.fa-puzzle-piece:before{content:"\f12e"}.fa-python:before{content:"\f3e2"}.fa-qq:before{content:"\f1d6"}.fa-qrcode:before{content:"\f029"}.fa-question:before{content:"\f128"}.fa-question-circle:before{content:"\f059"}.fa-quidditch:before{content:"\f458"}.fa-quinscape:before{content:"\f459"}.fa-quora:before{content:"\f2c4"}.fa-quote-left:before{content:"\f10d"}.fa-quote-right:before{content:"\f10e"}.fa-quran:before{content:"\f687"}.fa-r-project:before{content:"\f4f7"}.fa-radiation:before{content:"\f7b9"}.fa-radiation-alt:before{content:"\f7ba"}.fa-rainbow:before{content:"\f75b"}.fa-random:before{content:"\f074"}.fa-raspberry-pi:before{content:"\f7bb"}.fa-ravelry:before{content:"\f2d9"}.fa-react:before{content:"\f41b"}.fa-reacteurope:before{content:"\f75d"}.fa-readme:before{content:"\f4d5"}.fa-rebel:before{content:"\f1d0"}.fa-receipt:before{content:"\f543"}.fa-recycle:before{content:"\f1b8"}.fa-red-river:before{content:"\f3e3"}.fa-reddit:before{content:"\f1a1"}.fa-reddit-alien:before{content:"\f281"}.fa-reddit-square:before{content:"\f1a2"}.fa-redhat:before{content:"\f7bc"}.fa-redo:before{content:"\f01e"}.fa-redo-alt:before{content:"\f2f9"}.fa-registered:before{content:"\f25d"}.fa-renren:before{content:"\f18b"}.fa-reply:before{content:"\f3e5"}.fa-reply-all:before{content:"\f122"}.fa-replyd:before{content:"\f3e6"}.fa-republican:before{content:"\f75e"}.fa-researchgate:before{content:"\f4f8"}.fa-resolving:before{content:"\f3e7"}.fa-restroom:before{content:"\f7bd"}.fa-retweet:before{content:"\f079"}.fa-rev:before{content:"\f5b2"}.fa-ribbon:before{content:"\f4d6"}.fa-ring:before{content:"\f70b"}.fa-road:before{content:"\f018"}.fa-robot:before{content:"\f544"}.fa-rocket:before{content:"\f135"}.fa-rocketchat:before{content:"\f3e8"}.fa-rockrms:before{content:"\f3e9"}.fa-route:before{content:"\f4d7"}.fa-rss:before{content:"\f09e"}.fa-rss-square:before{content:"\f143"}.fa-ruble-sign:before{content:"\f158"}.fa-ruler:before{content:"\f545"}.fa-ruler-combined:before{content:"\f546"}.fa-ruler-horizontal:before{content:"\f547"}.fa-ruler-vertical:before{content:"\f548"}.fa-running:before{content:"\f70c"}.fa-rupee-sign:before{content:"\f156"}.fa-sad-cry:before{content:"\f5b3"}.fa-sad-tear:before{content:"\f5b4"}.fa-safari:before{content:"\f267"}.fa-sass:before{content:"\f41e"}.fa-satellite:before{content:"\f7bf"}.fa-satellite-dish:before{content:"\f7c0"}.fa-save:before{content:"\f0c7"}.fa-schlix:before{content:"\f3ea"}.fa-school:before{content:"\f549"}.fa-screwdriver:before{content:"\f54a"}.fa-scribd:before{content:"\f28a"}.fa-scroll:before{content:"\f70e"}.fa-sd-card:before{content:"\f7c2"}.fa-search:before{content:"\f002"}.fa-search-dollar:before{content:"\f688"}.fa-search-location:before{content:"\f689"}.fa-search-minus:before{content:"\f010"}.fa-search-plus:before{content:"\f00e"}.fa-searchengin:before{content:"\f3eb"}.fa-seedling:before{content:"\f4d8"}.fa-sellcast:before{content:"\f2da"}.fa-sellsy:before{content:"\f213"}.fa-server:before{content:"\f233"}.fa-servicestack:before{content:"\f3ec"}.fa-shapes:before{content:"\f61f"}.fa-share:before{content:"\f064"}.fa-share-alt:before{content:"\f1e0"}.fa-share-alt-square:before{content:"\f1e1"}.fa-share-square:before{content:"\f14d"}.fa-shekel-sign:before{content:"\f20b"}.fa-shield-alt:before{content:"\f3ed"}.fa-ship:before{content:"\f21a"}.fa-shipping-fast:before{content:"\f48b"}.fa-shirtsinbulk:before{content:"\f214"}.fa-shoe-prints:before{content:"\f54b"}.fa-shopping-bag:before{content:"\f290"}.fa-shopping-basket:before{content:"\f291"}.fa-shopping-cart:before{content:"\f07a"}.fa-shopware:before{content:"\f5b5"}.fa-shower:before{content:"\f2cc"}.fa-shuttle-van:before{content:"\f5b6"}.fa-sign:before{content:"\f4d9"}.fa-sign-in-alt:before{content:"\f2f6"}.fa-sign-language:before{content:"\f2a7"}.fa-sign-out-alt:before{content:"\f2f5"}.fa-signal:before{content:"\f012"}.fa-signature:before{content:"\f5b7"}.fa-sim-card:before{content:"\f7c4"}.fa-simplybuilt:before{content:"\f215"}.fa-sistrix:before{content:"\f3ee"}.fa-sitemap:before{content:"\f0e8"}.fa-sith:before{content:"\f512"}.fa-skating:before{content:"\f7c5"}.fa-sketch:before{content:"\f7c6"}.fa-skiing:before{content:"\f7c9"}.fa-skiing-nordic:before{content:"\f7ca"}.fa-skull:before{content:"\f54c"}.fa-skull-crossbones:before{content:"\f714"}.fa-skyatlas:before{content:"\f216"}.fa-skype:before{content:"\f17e"}.fa-slack:before{content:"\f198"}.fa-slack-hash:before{content:"\f3ef"}.fa-slash:before{content:"\f715"}.fa-sleigh:before{content:"\f7cc"}.fa-sliders-h:before{content:"\f1de"}.fa-slideshare:before{content:"\f1e7"}.fa-smile:before{content:"\f118"}.fa-smile-beam:before{content:"\f5b8"}.fa-smile-wink:before{content:"\f4da"}.fa-smog:before{content:"\f75f"}.fa-smoking:before{content:"\f48d"}.fa-smoking-ban:before{content:"\f54d"}.fa-sms:before{content:"\f7cd"}.fa-snapchat:before{content:"\f2ab"}.fa-snapchat-ghost:before{content:"\f2ac"}.fa-snapchat-square:before{content:"\f2ad"}.fa-snowboarding:before{content:"\f7ce"}.fa-snowflake:before{content:"\f2dc"}.fa-snowman:before{content:"\f7d0"}.fa-snowplow:before{content:"\f7d2"}.fa-socks:before{content:"\f696"}.fa-solar-panel:before{content:"\f5ba"}.fa-sort:before{content:"\f0dc"}.fa-sort-alpha-down:before{content:"\f15d"}.fa-sort-alpha-up:before{content:"\f15e"}.fa-sort-amount-down:before{content:"\f160"}.fa-sort-amount-up:before{content:"\f161"}.fa-sort-down:before{content:"\f0dd"}.fa-sort-numeric-down:before{content:"\f162"}.fa-sort-numeric-up:before{content:"\f163"}.fa-sort-up:before{content:"\f0de"}.fa-soundcloud:before{content:"\f1be"}.fa-sourcetree:before{content:"\f7d3"}.fa-spa:before{content:"\f5bb"}.fa-space-shuttle:before{content:"\f197"}.fa-speakap:before{content:"\f3f3"}.fa-spider:before{content:"\f717"}.fa-spinner:before{content:"\f110"}.fa-splotch:before{content:"\f5bc"}.fa-spotify:before{content:"\f1bc"}.fa-spray-can:before{content:"\f5bd"}.fa-square:before{content:"\f0c8"}.fa-square-full:before{content:"\f45c"}.fa-square-root-alt:before{content:"\f698"}.fa-squarespace:before{content:"\f5be"}.fa-stack-exchange:before{content:"\f18d"}.fa-stack-overflow:before{content:"\f16c"}.fa-stamp:before{content:"\f5bf"}.fa-star:before{content:"\f005"}.fa-star-and-crescent:before{content:"\f699"}.fa-star-half:before{content:"\f089"}.fa-star-half-alt:before{content:"\f5c0"}.fa-star-of-david:before{content:"\f69a"}.fa-star-of-life:before{content:"\f621"}.fa-staylinked:before{content:"\f3f5"}.fa-steam:before{content:"\f1b6"}.fa-steam-square:before{content:"\f1b7"}.fa-steam-symbol:before{content:"\f3f6"}.fa-step-backward:before{content:"\f048"}.fa-step-forward:before{content:"\f051"}.fa-stethoscope:before{content:"\f0f1"}.fa-sticker-mule:before{content:"\f3f7"}.fa-sticky-note:before{content:"\f249"}.fa-stop:before{content:"\f04d"}.fa-stop-circle:before{content:"\f28d"}.fa-stopwatch:before{content:"\f2f2"}.fa-store:before{content:"\f54e"}.fa-store-alt:before{content:"\f54f"}.fa-strava:before{content:"\f428"}.fa-stream:before{content:"\f550"}.fa-street-view:before{content:"\f21d"}.fa-strikethrough:before{content:"\f0cc"}.fa-stripe:before{content:"\f429"}.fa-stripe-s:before{content:"\f42a"}.fa-stroopwafel:before{content:"\f551"}.fa-studiovinari:before{content:"\f3f8"}.fa-stumbleupon:before{content:"\f1a4"}.fa-stumbleupon-circle:before{content:"\f1a3"}.fa-subscript:before{content:"\f12c"}.fa-subway:before{content:"\f239"}.fa-suitcase:before{content:"\f0f2"}.fa-suitcase-rolling:before{content:"\f5c1"}.fa-sun:before{content:"\f185"}.fa-superpowers:before{content:"\f2dd"}.fa-superscript:before{content:"\f12b"}.fa-supple:before{content:"\f3f9"}.fa-surprise:before{content:"\f5c2"}.fa-suse:before{content:"\f7d6"}.fa-swatchbook:before{content:"\f5c3"}.fa-swimmer:before{content:"\f5c4"}.fa-swimming-pool:before{content:"\f5c5"}.fa-synagogue:before{content:"\f69b"}.fa-sync:before{content:"\f021"}.fa-sync-alt:before{content:"\f2f1"}.fa-syringe:before{content:"\f48e"}.fa-table:before{content:"\f0ce"}.fa-table-tennis:before{content:"\f45d"}.fa-tablet:before{content:"\f10a"}.fa-tablet-alt:before{content:"\f3fa"}.fa-tablets:before{content:"\f490"}.fa-tachometer-alt:before{content:"\f3fd"}.fa-tag:before{content:"\f02b"}.fa-tags:before{content:"\f02c"}.fa-tape:before{content:"\f4db"}.fa-tasks:before{content:"\f0ae"}.fa-taxi:before{content:"\f1ba"}.fa-teamspeak:before{content:"\f4f9"}.fa-teeth:before{content:"\f62e"}.fa-teeth-open:before{content:"\f62f"}.fa-telegram:before{content:"\f2c6"}.fa-telegram-plane:before{content:"\f3fe"}.fa-temperature-high:before{content:"\f769"}.fa-temperature-low:before{content:"\f76b"}.fa-tencent-weibo:before{content:"\f1d5"}.fa-tenge:before{content:"\f7d7"}.fa-terminal:before{content:"\f120"}.fa-text-height:before{content:"\f034"}.fa-text-width:before{content:"\f035"}.fa-th:before{content:"\f00a"}.fa-th-large:before{content:"\f009"}.fa-th-list:before{content:"\f00b"}.fa-the-red-yeti:before{content:"\f69d"}.fa-theater-masks:before{content:"\f630"}.fa-themeco:before{content:"\f5c6"}.fa-themeisle:before{content:"\f2b2"}.fa-thermometer:before{content:"\f491"}.fa-thermometer-empty:before{content:"\f2cb"}.fa-thermometer-full:before{content:"\f2c7"}.fa-thermometer-half:before{content:"\f2c9"}.fa-thermometer-quarter:before{content:"\f2ca"}.fa-thermometer-three-quarters:before{content:"\f2c8"}.fa-think-peaks:before{content:"\f731"}.fa-thumbs-down:before{content:"\f165"}.fa-thumbs-up:before{content:"\f164"}.fa-thumbtack:before{content:"\f08d"}.fa-ticket-alt:before{content:"\f3ff"}.fa-times:before{content:"\f00d"}.fa-times-circle:before{content:"\f057"}.fa-tint:before{content:"\f043"}.fa-tint-slash:before{content:"\f5c7"}.fa-tired:before{content:"\f5c8"}.fa-toggle-off:before{content:"\f204"}.fa-toggle-on:before{content:"\f205"}.fa-toilet:before{content:"\f7d8"}.fa-toilet-paper:before{content:"\f71e"}.fa-toolbox:before{content:"\f552"}.fa-tools:before{content:"\f7d9"}.fa-tooth:before{content:"\f5c9"}.fa-torah:before{content:"\f6a0"}.fa-torii-gate:before{content:"\f6a1"}.fa-tractor:before{content:"\f722"}.fa-trade-federation:before{content:"\f513"}.fa-trademark:before{content:"\f25c"}.fa-traffic-light:before{content:"\f637"}.fa-train:before{content:"\f238"}.fa-tram:before{content:"\f7da"}.fa-transgender:before{content:"\f224"}.fa-transgender-alt:before{content:"\f225"}.fa-trash:before{content:"\f1f8"}.fa-trash-alt:before{content:"\f2ed"}.fa-tree:before{content:"\f1bb"}.fa-trello:before{content:"\f181"}.fa-tripadvisor:before{content:"\f262"}.fa-trophy:before{content:"\f091"}.fa-truck:before{content:"\f0d1"}.fa-truck-loading:before{content:"\f4de"}.fa-truck-monster:before{content:"\f63b"}.fa-truck-moving:before{content:"\f4df"}.fa-truck-pickup:before{content:"\f63c"}.fa-tshirt:before{content:"\f553"}.fa-tty:before{content:"\f1e4"}.fa-tumblr:before{content:"\f173"}.fa-tumblr-square:before{content:"\f174"}.fa-tv:before{content:"\f26c"}.fa-twitch:before{content:"\f1e8"}.fa-twitter:before{content:"\f099"}.fa-twitter-square:before{content:"\f081"}.fa-typo3:before{content:"\f42b"}.fa-uber:before{content:"\f402"}.fa-ubuntu:before{content:"\f7df"}.fa-uikit:before{content:"\f403"}.fa-umbrella:before{content:"\f0e9"}.fa-umbrella-beach:before{content:"\f5ca"}.fa-underline:before{content:"\f0cd"}.fa-undo:before{content:"\f0e2"}.fa-undo-alt:before{content:"\f2ea"}.fa-uniregistry:before{content:"\f404"}.fa-universal-access:before{content:"\f29a"}.fa-university:before{content:"\f19c"}.fa-unlink:before{content:"\f127"}.fa-unlock:before{content:"\f09c"}.fa-unlock-alt:before{content:"\f13e"}.fa-untappd:before{content:"\f405"}.fa-upload:before{content:"\f093"}.fa-ups:before{content:"\f7e0"}.fa-usb:before{content:"\f287"}.fa-user:before{content:"\f007"}.fa-user-alt:before{content:"\f406"}.fa-user-alt-slash:before{content:"\f4fa"}.fa-user-astronaut:before{content:"\f4fb"}.fa-user-check:before{content:"\f4fc"}.fa-user-circle:before{content:"\f2bd"}.fa-user-clock:before{content:"\f4fd"}.fa-user-cog:before{content:"\f4fe"}.fa-user-edit:before{content:"\f4ff"}.fa-user-friends:before{content:"\f500"}.fa-user-graduate:before{content:"\f501"}.fa-user-injured:before{content:"\f728"}.fa-user-lock:before{content:"\f502"}.fa-user-md:before{content:"\f0f0"}.fa-user-minus:before{content:"\f503"}.fa-user-ninja:before{content:"\f504"}.fa-user-plus:before{content:"\f234"}.fa-user-secret:before{content:"\f21b"}.fa-user-shield:before{content:"\f505"}.fa-user-slash:before{content:"\f506"}.fa-user-tag:before{content:"\f507"}.fa-user-tie:before{content:"\f508"}.fa-user-times:before{content:"\f235"}.fa-users:before{content:"\f0c0"}.fa-users-cog:before{content:"\f509"}.fa-usps:before{content:"\f7e1"}.fa-ussunnah:before{content:"\f407"}.fa-utensil-spoon:before{content:"\f2e5"}.fa-utensils:before{content:"\f2e7"}.fa-vaadin:before{content:"\f408"}.fa-vector-square:before{content:"\f5cb"}.fa-venus:before{content:"\f221"}.fa-venus-double:before{content:"\f226"}.fa-venus-mars:before{content:"\f228"}.fa-viacoin:before{content:"\f237"}.fa-viadeo:before{content:"\f2a9"}.fa-viadeo-square:before{content:"\f2aa"}.fa-vial:before{content:"\f492"}.fa-vials:before{content:"\f493"}.fa-viber:before{content:"\f409"}.fa-video:before{content:"\f03d"}.fa-video-slash:before{content:"\f4e2"}.fa-vihara:before{content:"\f6a7"}.fa-vimeo:before{content:"\f40a"}.fa-vimeo-square:before{content:"\f194"}.fa-vimeo-v:before{content:"\f27d"}.fa-vine:before{content:"\f1ca"}.fa-vk:before{content:"\f189"}.fa-vnv:before{content:"\f40b"}.fa-volleyball-ball:before{content:"\f45f"}.fa-volume-down:before{content:"\f027"}.fa-volume-mute:before{content:"\f6a9"}.fa-volume-off:before{content:"\f026"}.fa-volume-up:before{content:"\f028"}.fa-vote-yea:before{content:"\f772"}.fa-vr-cardboard:before{content:"\f729"}.fa-vuejs:before{content:"\f41f"}.fa-walking:before{content:"\f554"}.fa-wallet:before{content:"\f555"}.fa-warehouse:before{content:"\f494"}.fa-water:before{content:"\f773"}.fa-weebly:before{content:"\f5cc"}.fa-weibo:before{content:"\f18a"}.fa-weight:before{content:"\f496"}.fa-weight-hanging:before{content:"\f5cd"}.fa-weixin:before{content:"\f1d7"}.fa-whatsapp:before{content:"\f232"}.fa-whatsapp-square:before{content:"\f40c"}.fa-wheelchair:before{content:"\f193"}.fa-whmcs:before{content:"\f40d"}.fa-wifi:before{content:"\f1eb"}.fa-wikipedia-w:before{content:"\f266"}.fa-wind:before{content:"\f72e"}.fa-window-close:before{content:"\f410"}.fa-window-maximize:before{content:"\f2d0"}.fa-window-minimize:before{content:"\f2d1"}.fa-window-restore:before{content:"\f2d2"}.fa-windows:before{content:"\f17a"}.fa-wine-bottle:before{content:"\f72f"}.fa-wine-glass:before{content:"\f4e3"}.fa-wine-glass-alt:before{content:"\f5ce"}.fa-wix:before{content:"\f5cf"}.fa-wizards-of-the-coast:before{content:"\f730"}.fa-wolf-pack-battalion:before{content:"\f514"}.fa-won-sign:before{content:"\f159"}.fa-wordpress:before{content:"\f19a"}.fa-wordpress-simple:before{content:"\f411"}.fa-wpbeginner:before{content:"\f297"}.fa-wpexplorer:before{content:"\f2de"}.fa-wpforms:before{content:"\f298"}.fa-wpressr:before{content:"\f3e4"}.fa-wrench:before{content:"\f0ad"}.fa-x-ray:before{content:"\f497"}.fa-xbox:before{content:"\f412"}.fa-xing:before{content:"\f168"}.fa-xing-square:before{content:"\f169"}.fa-y-combinator:before{content:"\f23b"}.fa-yahoo:before{content:"\f19e"}.fa-yandex:before{content:"\f413"}.fa-yandex-international:before{content:"\f414"}.fa-yarn:before{content:"\f7e3"}.fa-yelp:before{content:"\f1e9"}.fa-yen-sign:before{content:"\f157"}.fa-yin-yang:before{content:"\f6ad"}.fa-yoast:before{content:"\f2b1"}.fa-youtube:before{content:"\f167"}.fa-youtube-square:before{content:"\f431"}.fa-zhihu:before{content:"\f63f"}.sr-only{border:0;clip:rect(0,0,0,0);height:1px;margin:-1px;overflow:hidden;padding:0;position:absolute;width:1px}.sr-only-focusable:active,.sr-only-focusable:focus{clip:auto;height:auto;margin:0;overflow:visible;position:static;width:auto}@font-face{font-family:"Font Awesome 5 Brands";font-style:normal;font-weight:normal;src:url(../webfonts/fa-brands-400.eot);src:url(../webfonts/fa-brands-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-brands-400.woff2) format("woff2"),url(../webfonts/fa-brands-400.woff) format("woff"),url(../webfonts/fa-brands-400.ttf) format("truetype"),url(../webfonts/fa-brands-400.svg#fontawesome) format("svg")}.fab{font-family:"Font Awesome 5 Brands"}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:400;src:url(../webfonts/fa-regular-400.eot);src:url(../webfonts/fa-regular-400.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-regular-400.woff2) format("woff2"),url(../webfonts/fa-regular-400.woff) format("woff"),url(../webfonts/fa-regular-400.ttf) format("truetype"),url(../webfonts/fa-regular-400.svg#fontawesome) format("svg")}.far{font-weight:400}@font-face{font-family:"Font Awesome 5 Free";font-style:normal;font-weight:900;src:url(../webfonts/fa-solid-900.eot);src:url(../webfonts/fa-solid-900.eot?#iefix) format("embedded-opentype"),url(../webfonts/fa-solid-900.woff2) format("woff2"),url(../webfonts/fa-solid-900.woff) format("woff"),url(../webfonts/fa-solid-900.ttf) format("truetype"),url(../webfonts/fa-solid-900.svg#fontawesome) format("svg")}.fa,.far,.fas{font-family:"Font Awesome 5 Free"}.fa,.fas{font-weight:900} \ No newline at end of file diff --git a/docs/22.2.1/css/hugo-theme.css b/docs/22.2.1/css/hugo-theme.css new file mode 100644 index 000000000..741cab196 --- /dev/null +++ b/docs/22.2.1/css/hugo-theme.css @@ -0,0 +1,254 @@ +/* Insert here special css for hugo theme, on top of any other imported css */ + + +/* Table of contents */ + +.progress ul { + list-style: none; + margin: 0; + padding: 0 5px; +} + +#TableOfContents { + font-size: 13px !important; + max-height: 85vh; + overflow: auto; + padding: 15px !important; +} + + +#TableOfContents > ul > li > ul > li > ul li { + margin-right: 8px; +} + +#TableOfContents > ul > li > a { + font-weight: bold; padding: 0 18px; margin: 0 2px; +} + +#TableOfContents > ul > li > ul > li > a { + font-weight: bold; +} + +#TableOfContents > ul > li > ul > li > ul > li > ul > li > ul > li { + display: none; +} + +body { + font-size: 16px !important; + color: #323232 !important; +} + +#body a.highlight, #body a.highlight:hover, #body a.highlight:focus { + text-decoration: none; + outline: none; + outline: 0; +} +#body a.highlight { + line-height: 1.1; + display: inline-block; +} +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + background-color: #0082a7; /*#CE3B2F*/ + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; +} +#body a.highlight:hover:after, #body a.highlight:focus:after { + width: 100%; +} +.progress { + position:absolute; + background-color: rgba(246, 246, 246, 0.97); + width: auto; + border: thin solid #ECECEC; + display:none; + z-index:200; +} + +#toc-menu { + border-right: thin solid #DAD8D8 !important; + padding-right: 1rem !important; + margin-right: 0.5rem !important; +} + +#sidebar-toggle-span { + border-right: thin solid #DAD8D8 !important; + padding-right: 0.5rem !important; + margin-right: 1rem !important; +} + +.btn { + display: inline-block !important; + padding: 6px 12px !important; + margin-bottom: 0 !important; + font-size: 14px !important; + font-weight: normal !important; + line-height: 1.42857143 !important; + text-align: center !important; + white-space: nowrap !important; + vertical-align: middle !important; + -ms-touch-action: manipulation !important; + touch-action: manipulation !important; + cursor: pointer !important; + -webkit-user-select: none !important; + -moz-user-select: none !important; + -ms-user-select: none !important; + user-select: none !important; + background-image: none !important; + border: 1px solid transparent !important; + border-radius: 4px !important; + -webkit-transition: all 0.15s !important; + -moz-transition: all 0.15s !important; + transition: all 0.15s !important; +} +.btn:focus { + /*outline: thin dotted; + outline: 5px auto -webkit-focus-ring-color; + outline-offset: -2px;*/ + outline: none !important; +} +.btn:hover, +.btn:focus { + color: #2b2b2b !important; + text-decoration: none !important; +} + +.btn-default { + color: #333 !important; + background-color: #fff !important; + border-color: #ccc !important; +} +.btn-default:hover, +.btn-default:focus, +.btn-default:active { + color: #fff !important; + background-color: #9e9e9e !important; + border-color: #9e9e9e !important; +} +.btn-default:active { + background-image: none !important; +} + +/* anchors */ +.anchor { + color: #00bdf3; + font-size: 0.5em; + cursor:pointer; + visibility:hidden; + margin-left: 0.5em; + position: absolute; + margin-top:0.1em; +} + +h2:hover .anchor, h3:hover .anchor, h4:hover .anchor, h5:hover .anchor, h6:hover .anchor { + visibility:visible; +} + +/* Redfines headers style */ + +h2, h3, h4, h5, h6 { + font-weight: 400; + line-height: 1.1; +} + +h1 a, h2 a, h3 a, h4 a, h5 a, h6 a { + font-weight: inherit; +} + +h2 { + font-size: 2.5rem; + line-height: 110% !important; + margin: 2.5rem 0 1.5rem 0; +} + +h3 { + font-size: 2rem; + line-height: 110% !important; + margin: 2rem 0 1rem 0; +} + +h4 { + font-size: 1.5rem; + line-height: 110% !important; + margin: 1.5rem 0 0.75rem 0; +} + +h5 { + font-size: 1rem; + line-height: 110% !important; + margin: 1rem 0 0.2rem 0; +} + +h6 { + font-size: 0.5rem; + line-height: 110% !important; + margin: 0.5rem 0 0.2rem 0; +} + +p { + margin: 1rem 0; +} + +figcaption h4 { + font-weight: 300 !important; + opacity: .85; + font-size: 1em; + text-align: center; + margin-top: -1.5em; +} + +.select-style { + border: 0; + width: 150px; + border-radius: 0px; + overflow: hidden; + display: inline-flex; +} + +.select-style svg { + fill: #ccc; + width: 14px; + height: 14px; + pointer-events: none; + margin: auto; +} + +.select-style svg:hover { + fill: #e6e6e6; +} + +.select-style select { + padding: 0; + width: 130%; + border: none; + box-shadow: none; + background: transparent; + background-image: none; + -webkit-appearance: none; + margin: auto; + margin-left: 0px; + margin-right: -20px; +} + +.select-style select:focus { + outline: none; +} + +.select-style :hover { + cursor: pointer; +} + +@media only all and (max-width: 47.938em) { + #breadcrumbs .links, #top-github-link-text { + display: none; + } +} + +.is-sticky #top-bar { + box-shadow: -1px 2px 5px 1px rgba(0, 0, 0, 0.1); +} \ No newline at end of file diff --git a/docs/22.2.1/css/hybrid.css b/docs/22.2.1/css/hybrid.css new file mode 100644 index 000000000..29735a189 --- /dev/null +++ b/docs/22.2.1/css/hybrid.css @@ -0,0 +1,102 @@ +/* + +vim-hybrid theme by w0ng (https://github.com/w0ng/vim-hybrid) + +*/ + +/*background color*/ +.hljs { + display: block; + overflow-x: auto; + padding: 0.5em; + background: #1d1f21; +} + +/*selection color*/ +.hljs::selection, +.hljs span::selection { + background: #373b41; +} + +.hljs::-moz-selection, +.hljs span::-moz-selection { + background: #373b41; +} + +/*foreground color*/ +.hljs { + color: #c5c8c6; +} + +/*color: fg_yellow*/ +.hljs-title, +.hljs-name { + color: #f0c674; +} + +/*color: fg_comment*/ +.hljs-comment, +.hljs-meta, +.hljs-meta .hljs-keyword { + color: #707880; +} + +/*color: fg_red*/ +.hljs-number, +.hljs-symbol, +.hljs-literal, +.hljs-deletion, +.hljs-link { + color: #cc6666 +} + +/*color: fg_green*/ +.hljs-string, +.hljs-doctag, +.hljs-addition, +.hljs-regexp, +.hljs-selector-attr, +.hljs-selector-pseudo { + color: #b5bd68; +} + +/*color: fg_purple*/ +.hljs-attribute, +.hljs-code, +.hljs-selector-id { + color: #b294bb; +} + +/*color: fg_blue*/ +.hljs-keyword, +.hljs-selector-tag, +.hljs-bullet, +.hljs-tag { + color: #81a2be; +} + +/*color: fg_aqua*/ +.hljs-subst, +.hljs-variable, +.hljs-template-tag, +.hljs-template-variable { + color: #8abeb7; +} + +/*color: fg_orange*/ +.hljs-type, +.hljs-built_in, +.hljs-builtin-name, +.hljs-quote, +.hljs-section, +.hljs-selector-class { + color: #de935f; +} + +.hljs-emphasis { + font-style: italic; +} + +.hljs-strong { + font-weight: bold; +} diff --git a/docs/22.2.1/css/nucleus.css b/docs/22.2.1/css/nucleus.css new file mode 100644 index 000000000..1897fc5d6 --- /dev/null +++ b/docs/22.2.1/css/nucleus.css @@ -0,0 +1,615 @@ +*, *::before, *::after { + -webkit-box-sizing: border-box; + -moz-box-sizing: border-box; + box-sizing: border-box; } + +@-webkit-viewport { + width: device-width; } +@-moz-viewport { + width: device-width; } +@-ms-viewport { + width: device-width; } +@-o-viewport { + width: device-width; } +@viewport { + width: device-width; } +html { + font-size: 100%; + -ms-text-size-adjust: 100%; + -webkit-text-size-adjust: 100%; } + +body { + margin: 0; } + +article, +aside, +details, +figcaption, +figure, +footer, +header, +hgroup, +main, +nav, +section, +summary { + display: block; } + +audio, +canvas, +progress, +video { + display: inline-block; + vertical-align: baseline; } + +audio:not([controls]) { + display: none; + height: 0; } + +[hidden], +template { + display: none; } + +a { + background: transparent; + text-decoration: none; } + +a:active, +a:hover { + outline: 0; } + +abbr[title] { + border-bottom: 1px dotted; } + +b, +strong { + font-weight: bold; } + +dfn { + font-style: italic; } + +mark { + background: #FFFF27; + color: #333; } + +sub, +sup { + font-size: 0.8rem; + line-height: 0; + position: relative; + vertical-align: baseline; } + +sup { + top: -0.5em; } + +sub { + bottom: -0.25em; } + +img { + border: 0; + max-width: 100%; } + +svg:not(:root) { + overflow: hidden; } + +figure { + margin: 1em 40px; } + +hr { + height: 0; } + +pre { + overflow: auto; } + +button, +input, +optgroup, +select, +textarea { + color: inherit; + font: inherit; + margin: 0; } + +button { + overflow: visible; } + +button, +select { + text-transform: none; } + +button, +html input[type="button"], +input[type="reset"], +input[type="submit"] { + -webkit-appearance: button; + cursor: pointer; } + +button[disabled], +html input[disabled] { + cursor: default; } + +button::-moz-focus-inner, +input::-moz-focus-inner { + border: 0; + padding: 0; } + +input { + line-height: normal; } + +input[type="checkbox"], +input[type="radio"] { + padding: 0; } + +input[type="number"]::-webkit-inner-spin-button, +input[type="number"]::-webkit-outer-spin-button { + height: auto; } + +input[type="search"] { + -webkit-appearance: textfield; } + +input[type="search"]::-webkit-search-cancel-button, +input[type="search"]::-webkit-search-decoration { + -webkit-appearance: none; } + +legend { + border: 0; + padding: 0; } + +textarea { + overflow: auto; } + +optgroup { + font-weight: bold; } + +table { + border-collapse: collapse; + border-spacing: 0; + table-layout: fixed; + width: 100%; } + +tr, td, th { + vertical-align: middle; } + +th, td { + padding: 0.425rem 0; } + +th { + text-align: left; } + +.container { + width: 75em; + margin: 0 auto; + padding: 0; } + @media only all and (min-width: 60em) and (max-width: 74.938em) { + .container { + width: 60em; } } + @media only all and (min-width: 48em) and (max-width: 59.938em) { + .container { + width: 48em; } } + @media only all and (min-width: 30.063em) and (max-width: 47.938em) { + .container { + width: 30em; } } + @media only all and (max-width: 30em) { + .container { + width: 100%; } } + +.grid { + display: -webkit-box; + display: -moz-box; + display: box; + display: -webkit-flex; + display: -moz-flex; + display: -ms-flexbox; + display: flex; + -webkit-flex-flow: row; + -moz-flex-flow: row; + flex-flow: row; + list-style: none; + margin: 0; + padding: 0; } + @media only all and (max-width: 47.938em) { + .grid { + -webkit-flex-flow: row wrap; + -moz-flex-flow: row wrap; + flex-flow: row wrap; } } + +.block { + -webkit-box-flex: 1; + -moz-box-flex: 1; + box-flex: 1; + -webkit-flex: 1; + -moz-flex: 1; + -ms-flex: 1; + flex: 1; + min-width: 0; + min-height: 0; } + @media only all and (max-width: 47.938em) { + .block { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 100%; + -moz-flex: 0 100%; + -ms-flex: 0 100%; + flex: 0 100%; } } + +.content { + margin: 0.625rem; + padding: 0.938rem; } + +@media only all and (max-width: 47.938em) { + body [class*="size-"] { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 100%; + -moz-flex: 0 100%; + -ms-flex: 0 100%; + flex: 0 100%; } } + +.size-1-2 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 50%; + -moz-flex: 0 50%; + -ms-flex: 0 50%; + flex: 0 50%; } + +.size-1-3 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 33.33333%; + -moz-flex: 0 33.33333%; + -ms-flex: 0 33.33333%; + flex: 0 33.33333%; } + +.size-1-4 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 25%; + -moz-flex: 0 25%; + -ms-flex: 0 25%; + flex: 0 25%; } + +.size-1-5 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 20%; + -moz-flex: 0 20%; + -ms-flex: 0 20%; + flex: 0 20%; } + +.size-1-6 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 16.66667%; + -moz-flex: 0 16.66667%; + -ms-flex: 0 16.66667%; + flex: 0 16.66667%; } + +.size-1-7 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 14.28571%; + -moz-flex: 0 14.28571%; + -ms-flex: 0 14.28571%; + flex: 0 14.28571%; } + +.size-1-8 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 12.5%; + -moz-flex: 0 12.5%; + -ms-flex: 0 12.5%; + flex: 0 12.5%; } + +.size-1-9 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 11.11111%; + -moz-flex: 0 11.11111%; + -ms-flex: 0 11.11111%; + flex: 0 11.11111%; } + +.size-1-10 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 10%; + -moz-flex: 0 10%; + -ms-flex: 0 10%; + flex: 0 10%; } + +.size-1-11 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 9.09091%; + -moz-flex: 0 9.09091%; + -ms-flex: 0 9.09091%; + flex: 0 9.09091%; } + +.size-1-12 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 8.33333%; + -moz-flex: 0 8.33333%; + -ms-flex: 0 8.33333%; + flex: 0 8.33333%; } + +@media only all and (min-width: 48em) and (max-width: 59.938em) { + .size-tablet-1-2 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 50%; + -moz-flex: 0 50%; + -ms-flex: 0 50%; + flex: 0 50%; } + + .size-tablet-1-3 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 33.33333%; + -moz-flex: 0 33.33333%; + -ms-flex: 0 33.33333%; + flex: 0 33.33333%; } + + .size-tablet-1-4 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 25%; + -moz-flex: 0 25%; + -ms-flex: 0 25%; + flex: 0 25%; } + + .size-tablet-1-5 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 20%; + -moz-flex: 0 20%; + -ms-flex: 0 20%; + flex: 0 20%; } + + .size-tablet-1-6 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 16.66667%; + -moz-flex: 0 16.66667%; + -ms-flex: 0 16.66667%; + flex: 0 16.66667%; } + + .size-tablet-1-7 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 14.28571%; + -moz-flex: 0 14.28571%; + -ms-flex: 0 14.28571%; + flex: 0 14.28571%; } + + .size-tablet-1-8 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 12.5%; + -moz-flex: 0 12.5%; + -ms-flex: 0 12.5%; + flex: 0 12.5%; } + + .size-tablet-1-9 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 11.11111%; + -moz-flex: 0 11.11111%; + -ms-flex: 0 11.11111%; + flex: 0 11.11111%; } + + .size-tablet-1-10 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 10%; + -moz-flex: 0 10%; + -ms-flex: 0 10%; + flex: 0 10%; } + + .size-tablet-1-11 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 9.09091%; + -moz-flex: 0 9.09091%; + -ms-flex: 0 9.09091%; + flex: 0 9.09091%; } + + .size-tablet-1-12 { + -webkit-box-flex: 0; + -moz-box-flex: 0; + box-flex: 0; + -webkit-flex: 0 8.33333%; + -moz-flex: 0 8.33333%; + -ms-flex: 0 8.33333%; + flex: 0 8.33333%; } } +@media only all and (max-width: 47.938em) { + @supports not (flex-wrap: wrap) { + .grid { + display: block; + -webkit-box-lines: inherit; + -moz-box-lines: inherit; + box-lines: inherit; + -webkit-flex-wrap: inherit; + -moz-flex-wrap: inherit; + -ms-flex-wrap: inherit; + flex-wrap: inherit; } + + .block { + display: block; + -webkit-box-flex: inherit; + -moz-box-flex: inherit; + box-flex: inherit; + -webkit-flex: inherit; + -moz-flex: inherit; + -ms-flex: inherit; + flex: inherit; } } } +.first-block { + -webkit-box-ordinal-group: 0; + -webkit-order: -1; + -ms-flex-order: -1; + order: -1; } + +.last-block { + -webkit-box-ordinal-group: 2; + -webkit-order: 1; + -ms-flex-order: 1; + order: 1; } + +.fixed-blocks { + -webkit-flex-flow: row wrap; + -moz-flex-flow: row wrap; + flex-flow: row wrap; } + .fixed-blocks .block { + -webkit-box-flex: inherit; + -moz-box-flex: inherit; + box-flex: inherit; + -webkit-flex: inherit; + -moz-flex: inherit; + -ms-flex: inherit; + flex: inherit; + width: 25%; } + @media only all and (min-width: 60em) and (max-width: 74.938em) { + .fixed-blocks .block { + width: 33.33333%; } } + @media only all and (min-width: 48em) and (max-width: 59.938em) { + .fixed-blocks .block { + width: 50%; } } + @media only all and (max-width: 47.938em) { + .fixed-blocks .block { + width: 100%; } } + +body { + font-size: 1.05rem; + line-height: 1.7; } + +h1, h2, h3, h4, h5, h6 { + margin: 0.85rem 0 1.7rem 0; + text-rendering: optimizeLegibility; } + +h1 { + font-size: 3.25rem; } + +h2 { + font-size: 2.55rem; } + +h3 { + font-size: 2.15rem; } + +h4 { + font-size: 1.8rem; } + +h5 { + font-size: 1.4rem; } + +h6 { + font-size: 0.9rem; } + +p { + margin: 1.7rem 0; } + +ul, ol { + margin-top: 1.7rem; + margin-bottom: 1.7rem; } + ul ul, ul ol, ol ul, ol ol { + margin-top: 0; + margin-bottom: 0; } + +blockquote { + margin: 1.7rem 0; + padding-left: 0.85rem; } + +cite { + display: block; + font-size: 0.925rem; } + cite:before { + content: "\2014 \0020"; } + +pre { + margin: 1.7rem 0; + padding: 0.938rem; } + +code { + vertical-align: bottom; } + +small { + font-size: 0.925rem; } + +hr { + border-left: none; + border-right: none; + border-top: none; + margin: 1.7rem 0; } + +fieldset { + border: 0; + padding: 0.938rem; + margin: 0 0 1.7rem 0; } + +input, +label, +select { + display: block; } + +label { + margin-bottom: 0.425rem; } + label.required:after { + content: "*"; } + label abbr { + display: none; } + +textarea, input[type="email"], input[type="number"], input[type="password"], input[type="search"], input[type="tel"], input[type="text"], input[type="url"], input[type="color"], input[type="date"], input[type="datetime"], input[type="datetime-local"], input[type="month"], input[type="time"], input[type="week"], select[multiple=multiple] { + -webkit-transition: border-color; + -moz-transition: border-color; + transition: border-color; + border-radius: 0.1875rem; + margin-bottom: 0.85rem; + padding: 0.425rem 0.425rem; + width: 100%; } + textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + outline: none; } + +textarea { + resize: vertical; } + +input[type="checkbox"], input[type="radio"] { + display: inline; + margin-right: 0.425rem; } + +input[type="file"] { + width: 100%; } + +select { + width: auto; + max-width: 100%; + margin-bottom: 1.7rem; } + +button, +input[type="submit"] { + cursor: pointer; + user-select: none; + vertical-align: middle; + white-space: nowrap; + border: inherit; } diff --git a/docs/22.2.1/css/perfect-scrollbar.min.css b/docs/22.2.1/css/perfect-scrollbar.min.css new file mode 100644 index 000000000..ebd2cb43b --- /dev/null +++ b/docs/22.2.1/css/perfect-scrollbar.min.css @@ -0,0 +1,2 @@ +/* perfect-scrollbar v0.6.13 */ +.ps-container{-ms-touch-action:auto;touch-action:auto;overflow:hidden !important;-ms-overflow-style:none}@supports (-ms-overflow-style: none){.ps-container{overflow:auto !important}}@media screen and (-ms-high-contrast: active), (-ms-high-contrast: none){.ps-container{overflow:auto !important}}.ps-container.ps-active-x>.ps-scrollbar-x-rail,.ps-container.ps-active-y>.ps-scrollbar-y-rail{display:block;background-color:transparent}.ps-container.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail{background-color:#eee;opacity:.9}.ps-container.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail>.ps-scrollbar-x{background-color:#999;height:11px}.ps-container.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail{background-color:#eee;opacity:.9}.ps-container.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail>.ps-scrollbar-y{background-color:#999;width:11px}.ps-container>.ps-scrollbar-x-rail{display:none;position:absolute;opacity:0;-webkit-transition:background-color .2s linear, opacity .2s linear;-o-transition:background-color .2s linear, opacity .2s linear;-moz-transition:background-color .2s linear, opacity .2s linear;transition:background-color .2s linear, opacity .2s linear;bottom:0px;height:15px}.ps-container>.ps-scrollbar-x-rail>.ps-scrollbar-x{position:absolute;background-color:#aaa;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;-o-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;-moz-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;bottom:2px;height:6px}.ps-container>.ps-scrollbar-x-rail:hover>.ps-scrollbar-x,.ps-container>.ps-scrollbar-x-rail:active>.ps-scrollbar-x{height:11px}.ps-container>.ps-scrollbar-y-rail{display:none;position:absolute;opacity:0;-webkit-transition:background-color .2s linear, opacity .2s linear;-o-transition:background-color .2s linear, opacity .2s linear;-moz-transition:background-color .2s linear, opacity .2s linear;transition:background-color .2s linear, opacity .2s linear;right:0;width:15px}.ps-container>.ps-scrollbar-y-rail>.ps-scrollbar-y{position:absolute;background-color:#aaa;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, -webkit-border-radius .2s ease-in-out;-o-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;-moz-transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out;transition:background-color .2s linear, height .2s linear, width .2s ease-in-out, border-radius .2s ease-in-out, -webkit-border-radius .2s ease-in-out, -moz-border-radius .2s ease-in-out;right:2px;width:6px}.ps-container>.ps-scrollbar-y-rail:hover>.ps-scrollbar-y,.ps-container>.ps-scrollbar-y-rail:active>.ps-scrollbar-y{width:11px}.ps-container:hover.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail{background-color:#eee;opacity:.9}.ps-container:hover.ps-in-scrolling.ps-x>.ps-scrollbar-x-rail>.ps-scrollbar-x{background-color:#999;height:11px}.ps-container:hover.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail{background-color:#eee;opacity:.9}.ps-container:hover.ps-in-scrolling.ps-y>.ps-scrollbar-y-rail>.ps-scrollbar-y{background-color:#999;width:11px}.ps-container:hover>.ps-scrollbar-x-rail,.ps-container:hover>.ps-scrollbar-y-rail{opacity:.6}.ps-container:hover>.ps-scrollbar-x-rail:hover{background-color:#eee;opacity:.9}.ps-container:hover>.ps-scrollbar-x-rail:hover>.ps-scrollbar-x{background-color:#999}.ps-container:hover>.ps-scrollbar-y-rail:hover{background-color:#eee;opacity:.9}.ps-container:hover>.ps-scrollbar-y-rail:hover>.ps-scrollbar-y{background-color:#999} diff --git a/docs/22.2.1/css/tags.css b/docs/22.2.1/css/tags.css new file mode 100644 index 000000000..495d2f9f7 --- /dev/null +++ b/docs/22.2.1/css/tags.css @@ -0,0 +1,49 @@ +/* Tags */ + +#head-tags{ + margin-left:1em; + margin-top:1em; +} + +#body .tags a.tag-link { + display: inline-block; + line-height: 2em; + font-size: 0.8em; + position: relative; + margin: 0 16px 8px 0; + padding: 0 10px 0 12px; + background: #8451a1; + + -webkit-border-bottom-right-radius: 3px; + border-bottom-right-radius: 3px; + -webkit-border-top-right-radius: 3px; + border-top-right-radius: 3px; + + -webkit-box-shadow: 0 1px 2px rgba(0,0,0,0.2); + box-shadow: 0 1px 2px rgba(0,0,0,0.2); + color: #fff; +} + +#body .tags a.tag-link:before { + content: ""; + position: absolute; + top:0; + left: -1em; + width: 0; + height: 0; + border-color: transparent #8451a1 transparent transparent; + border-style: solid; + border-width: 1em 1em 1em 0; +} + +#body .tags a.tag-link:after { + content: ""; + position: absolute; + top: 10px; + left: 1px; + width: 5px; + height: 5px; + -webkit-border-radius: 50%; + border-radius: 100%; + background: #fff; +} diff --git a/docs/22.2.1/css/theme-blue.css b/docs/22.2.1/css/theme-blue.css new file mode 100644 index 000000000..9771ae5e3 --- /dev/null +++ b/docs/22.2.1/css/theme-blue.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#1C90F3; /* Color of links */ + --MAIN-LINK-HOVER-color:#167ad0; /* Color of hovered links */ + --MAIN-ANCHOR-color: #1C90F3; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#1C90F3; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#33a1ff; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#167ad0; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #33a1ff; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #a1d2fd; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#20272b; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#252c31; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #33a1ff; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #20272b; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/22.2.1/css/theme-green.css b/docs/22.2.1/css/theme-green.css new file mode 100644 index 000000000..3b0b1f721 --- /dev/null +++ b/docs/22.2.1/css/theme-green.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#599a3e; /* Color of links */ + --MAIN-LINK-HOVER-color:#3f6d2c; /* Color of hovered links */ + --MAIN-ANCHOR-color: #599a3e; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#74b559; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#9cd484; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#599a3e; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #84c767; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #c7f7c4; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#1b211c; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#222723; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #599a3e; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #18211c; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/22.2.1/css/theme-red.css b/docs/22.2.1/css/theme-red.css new file mode 100644 index 000000000..36c9278e5 --- /dev/null +++ b/docs/22.2.1/css/theme-red.css @@ -0,0 +1,111 @@ + +:root{ + + --MAIN-TEXT-color:#323232; /* Color of text by default */ + --MAIN-TITLES-TEXT-color: #5e5e5e; /* Color of titles h2-h3-h4-h5 */ + --MAIN-LINK-color:#f31c1c; /* Color of links */ + --MAIN-LINK-HOVER-color:#d01616; /* Color of hovered links */ + --MAIN-ANCHOR-color: #f31c1c; /* color of anchors on titles */ + + --MENU-HEADER-BG-color:#dc1010; /* Background color of menu header */ + --MENU-HEADER-BORDER-color:#e23131; /*Color of menu header border */ + + --MENU-SEARCH-BG-color:#b90000; /* Search field background color (by default borders + icons) */ + --MENU-SEARCH-BOX-color: #ef2020; /* Override search field border color */ + --MENU-SEARCH-BOX-ICONS-color: #fda1a1; /* Override search field icons color */ + + --MENU-SECTIONS-ACTIVE-BG-color:#2b2020; /* Background color of the active section and its childs */ + --MENU-SECTIONS-BG-color:#312525; /* Background color of other sections */ + --MENU-SECTIONS-LINK-color: #ccc; /* Color of links in menu */ + --MENU-SECTIONS-LINK-HOVER-color: #e6e6e6; /* Color of links in menu, when hovered */ + --MENU-SECTION-ACTIVE-CATEGORY-color: #777; /* Color of active category text */ + --MENU-SECTION-ACTIVE-CATEGORY-BG-color: #fff; /* Color of background for the active category (only) */ + + --MENU-VISITED-color: #ff3333; /* Color of 'page visited' icons in menu */ + --MENU-SECTION-HR-color: #2b2020; /* Color of
separator in menu */ + +} + +body { + color: var(--MAIN-TEXT-color) !important; +} + +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: none; + box-shadow: none; +} + +h2, h3, h4, h5 { + color: var(--MAIN-TITLES-TEXT-color) !important; +} + +a { + color: var(--MAIN-LINK-color); +} + +.anchor { + color: var(--MAIN-ANCHOR-color); +} + +a:hover { + color: var(--MAIN-LINK-HOVER-color); +} + +#sidebar ul li.visited > a .read-icon { + color: var(--MENU-VISITED-color); +} + +#body a.highlight:after { + display: block; + content: ""; + height: 1px; + width: 0%; + -webkit-transition: width 0.5s ease; + -moz-transition: width 0.5s ease; + -ms-transition: width 0.5s ease; + transition: width 0.5s ease; + background-color: var(--MAIN-LINK-HOVER-color); +} +#sidebar { + background-color: var(--MENU-SECTIONS-BG-color); +} +#sidebar #header-wrapper { + background: var(--MENU-HEADER-BG-color); + color: var(--MENU-SEARCH-BOX-color); + border-color: var(--MENU-HEADER-BORDER-color); +} +#sidebar .searchbox { + border-color: var(--MENU-SEARCH-BOX-color); + background: var(--MENU-SEARCH-BG-color); +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: var(--MENU-SECTIONS-ACTIVE-BG-color); +} +#sidebar .searchbox * { + color: var(--MENU-SEARCH-BOX-ICONS-color); +} + +#sidebar a { + color: var(--MENU-SECTIONS-LINK-color); +} + +#sidebar a:hover { + color: var(--MENU-SECTIONS-LINK-HOVER-color); +} + +#sidebar ul li.active > a { + background: var(--MENU-SECTION-ACTIVE-CATEGORY-BG-color); + color: var(--MENU-SECTION-ACTIVE-CATEGORY-color) !important; +} + +#sidebar hr { + border-color: var(--MENU-SECTION-HR-color); +} + +#body .tags a.tag-link { + background-color: var(--MENU-HEADER-BG-color); +} + +#body .tags a.tag-link:before { + border-right-color: var(--MENU-HEADER-BG-color); +} \ No newline at end of file diff --git a/docs/22.2.1/css/theme.css b/docs/22.2.1/css/theme.css new file mode 100644 index 000000000..9b4550457 --- /dev/null +++ b/docs/22.2.1/css/theme.css @@ -0,0 +1,1141 @@ +@charset "UTF-8"; + +/* Tags */ +@import "tags.css"; + +#top-github-link, #body #breadcrumbs { + position: relative; + top: 50%; + -webkit-transform: translateY(-50%); + -moz-transform: translateY(-50%); + -o-transform: translateY(-50%); + -ms-transform: translateY(-50%); + transform: translateY(-50%); +} +.button, .button-secondary { + display: inline-block; + padding: 7px 12px; +} +.button:active, .button-secondary:active { + margin: 2px 0 -2px 0; +} +@font-face { + font-family: 'Novacento Sans Wide'; + src: url("../fonts/Novecentosanswide-UltraLight-webfont.eot"); + src: url("../fonts/Novecentosanswide-UltraLight-webfont.eot?#iefix") format("embedded-opentype"), url("../fonts/Novecentosanswide-UltraLight-webfont.woff2") format("woff2"), url("../fonts/Novecentosanswide-UltraLight-webfont.woff") format("woff"), url("../fonts/Novecentosanswide-UltraLight-webfont.ttf") format("truetype"), url("../fonts/Novecentosanswide-UltraLight-webfont.svg#novecento_sans_wideultralight") format("svg"); + font-style: normal; + font-weight: 200; +} +@font-face { + font-family: 'Work Sans'; + font-style: normal; + font-weight: 300; + src: url("../fonts/Work_Sans_300.eot?#iefix") format("embedded-opentype"), url("../fonts/Work_Sans_300.woff") format("woff"), url("../fonts/Work_Sans_300.woff2") format("woff2"), url("../fonts/Work_Sans_300.svg#WorkSans") format("svg"), url("../fonts/Work_Sans_300.ttf") format("truetype"); +} +@font-face { + font-family: 'Work Sans'; + font-style: normal; + font-weight: 500; + src: url("../fonts/Work_Sans_500.eot?#iefix") format("embedded-opentype"), url("../fonts/Work_Sans_500.woff") format("woff"), url("../fonts/Work_Sans_500.woff2") format("woff2"), url("../fonts/Work_Sans_500.svg#WorkSans") format("svg"), url("../fonts/Work_Sans_500.ttf") format("truetype"); +} +body { + background: #fff; + color: #777; +} +body #chapter h1 { + font-size: 3.5rem; +} +@media only all and (min-width: 48em) and (max-width: 59.938em) { + body #chapter h1 { + font-size: 3rem; + } +} +@media only all and (max-width: 47.938em) { + body #chapter h1 { + font-size: 2rem; + } +} +a { + color: #00bdf3; +} +a:hover { + color: #0082a7; +} +pre { + position: relative; + color: #ffffff; +} +.bg { + background: #fff; + border: 1px solid #eaeaea; +} +b, strong, label, th { + font-weight: 600; +} +.default-animation, #header #logo-svg, #header #logo-svg path, #sidebar, #sidebar ul, #body, #body .padding, #body .nav { + -webkit-transition: all 0.5s ease; + -moz-transition: all 0.5s ease; + transition: all 0.5s ease; +} +#grav-logo { + max-width: 60%; +} +#grav-logo path { + fill: #fff !important; +} +#sidebar { + font-weight: 300 !important; +} +fieldset { + border: 1px solid #ddd; +} +textarea, input[type="email"], input[type="number"], input[type="password"], input[type="search"], input[type="tel"], input[type="text"], input[type="url"], input[type="color"], input[type="date"], input[type="datetime"], input[type="datetime-local"], input[type="month"], input[type="time"], input[type="week"], select[multiple=multiple] { + background-color: white; + border: 1px solid #ddd; + box-shadow: inset 0 1px 3px rgba(0, 0, 0, 0.06); +} +textarea:hover, input[type="email"]:hover, input[type="number"]:hover, input[type="password"]:hover, input[type="search"]:hover, input[type="tel"]:hover, input[type="text"]:hover, input[type="url"]:hover, input[type="color"]:hover, input[type="date"]:hover, input[type="datetime"]:hover, input[type="datetime-local"]:hover, input[type="month"]:hover, input[type="time"]:hover, input[type="week"]:hover, select[multiple=multiple]:hover { + border-color: #c4c4c4; +} +textarea:focus, input[type="email"]:focus, input[type="number"]:focus, input[type="password"]:focus, input[type="search"]:focus, input[type="tel"]:focus, input[type="text"]:focus, input[type="url"]:focus, input[type="color"]:focus, input[type="date"]:focus, input[type="datetime"]:focus, input[type="datetime-local"]:focus, input[type="month"]:focus, input[type="time"]:focus, input[type="week"]:focus, select[multiple=multiple]:focus { + border-color: #00bdf3; + box-shadow: inset 0 1px 3px rgba(0,0,0,.06),0 0 5px rgba(0,169,218,.7) +} +#header-wrapper { + background: #8451a1; + color: #fff; + text-align: center; + border-bottom: 4px solid #9c6fb6; + padding: 1rem; +} +#header a { + display: inline-block; +} +#header #logo-svg { + width: 8rem; + height: 2rem; +} +#header #logo-svg path { + fill: #fff; +} +.searchbox { + margin-top: 1rem; + position: relative; + border: 1px solid #915eae; + background: #764890; + border-radius: 4px; +} +.searchbox label { + color: rgba(255, 255, 255, 0.8); + position: absolute; + left: 10px; + top: 3px; +} +.searchbox span { + color: rgba(255, 255, 255, 0.6); + position: absolute; + right: 10px; + top: 3px; + cursor: pointer; +} +.searchbox span:hover { + color: rgba(255, 255, 255, 0.9); +} +.searchbox input { + display: inline-block; + color: #fff; + width: 100%; + height: 30px; + background: transparent; + border: 0; + padding: 0 25px 0 30px; + margin: 0; + font-weight: 300; +} +.searchbox input::-webkit-input-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input::-moz-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input:-moz-placeholder { + color: rgba(255, 255, 255, 0.6); +} +.searchbox input:-ms-input-placeholder { + color: rgba(255, 255, 255, 0.6); +} +#sidebar-toggle-span { + display: none; +} +@media only all and (max-width: 47.938em) { + #sidebar-toggle-span { + display: inline; + } +} +#sidebar { + background-color: #322A38; + position: fixed; + top: 0; + width: 300px; + bottom: 0; + left: 0; + font-weight: 400; + font-size: 15px; +} +#sidebar a { + color: #ccc; +} +#sidebar a:hover { + color: #e6e6e6; +} +#sidebar a.subtitle { + color: rgba(204, 204, 204, 0.6); +} +#sidebar hr { + border-bottom: 1px solid #2a232f; +} +#sidebar a.padding { + padding: 0 1rem; +} +#sidebar h5 { + margin: 2rem 0 0; + position: relative; + line-height: 2; +} +#sidebar h5 a { + display: block; + margin-left: 0; + margin-right: 0; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar h5 i { + color: rgba(204, 204, 204, 0.6); + position: absolute; + right: 0.6rem; + top: 0.7rem; + font-size: 80%; +} +#sidebar h5.parent a { + background: #201b24; + color: #d9d9d9 !important; +} +#sidebar h5.active a { + background: #fff; + color: #777 !important; +} +#sidebar h5.active i { + color: #777 !important; +} +#sidebar h5 + ul.topics { + display: none; + margin-top: 0; +} +#sidebar h5.parent + ul.topics, #sidebar h5.active + ul.topics { + display: block; +} +#sidebar ul { + list-style: none; + padding: 0; + margin: 0; +} +#sidebar ul.searched a { + color: #999999; +} +#sidebar ul.searched .search-match a { + color: #e6e6e6; +} +#sidebar ul.searched .search-match a:hover { + color: white; +} +#sidebar ul.topics { + margin: 0 1rem; +} +#sidebar ul.topics.searched ul { + display: block; +} +#sidebar ul.topics ul { + display: none; + padding-bottom: 1rem; +} +#sidebar ul.topics ul ul { + padding-bottom: 0; +} +#sidebar ul.topics li.parent ul, #sidebar ul.topics > li.active ul { + display: block; +} +#sidebar ul.topics > li > a { + line-height: 2rem; + font-size: 1.1rem; +} +#sidebar ul.topics > li > a b { + opacity: 0.5; + font-weight: normal; +} +#sidebar ul.topics > li > a .fa { + margin-top: 9px; +} +#sidebar ul.topics > li.parent, #sidebar ul.topics > li.active { + background: #251f29; + margin-left: -1rem; + margin-right: -1rem; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar ul li.active > a { + background: #fff; + color: #777 !important; + margin-left: -1rem; + margin-right: -1rem; + padding-left: 1rem; + padding-right: 1rem; +} +#sidebar ul li { + padding: 0; +} +#sidebar ul li.visited + span { + margin-right: 16px; +} +#sidebar ul li a { + display: block; + padding: 2px 0; +} +#sidebar ul li a span { + text-overflow: ellipsis; + overflow: hidden; + white-space: nowrap; + display: block; +} +#sidebar ul li > a { + padding: 4px 0; +} +#sidebar ul li.visited > a .read-icon { + color: #9c6fb6; + display: inline; +} +#sidebar ul li li { + padding-left: 1rem; + text-indent: 0.2rem; +} +#main { + background: #f7f7f7; + margin: 0 0 1.563rem 0; +} +#body { + position: relative; + margin-left: 300px; + min-height: 100%; +} +#body img, #body .video-container { + margin: 3rem auto; + display: block; + text-align: center; +} +#body img.border, #body .video-container.border { + border: 2px solid #e6e6e6 !important; + padding: 2px; +} +#body img.shadow, #body .video-container.shadow { + box-shadow: 0 10px 30px rgba(0, 0, 0, 0.1); +} +#body img.inline { + display: inline !important; + margin: 0 !important; + vertical-align: bottom; +} +#body .bordered { + border: 1px solid #ccc; +} +#body .padding { + padding: 3rem 6rem; +} +@media only all and (max-width: 59.938em) { + #body .padding { + position: static; + padding: 15px 3rem; + } +} +@media only all and (max-width: 47.938em) { + #body .padding { + padding: 5px 1rem; + } +} +#body h1 + hr { + margin-top: -1.7rem; + margin-bottom: 3rem; +} +@media only all and (max-width: 59.938em) { + #body #navigation { + position: static; + margin-right: 0 !important; + width: 100%; + display: table; + } +} +#body .nav { + position: fixed; + top: 0; + bottom: 0; + width: 4rem; + font-size: 50px; + height: 100%; + cursor: pointer; + display: table; + text-align: center; +} +#body .nav > i { + display: table-cell; + vertical-align: middle; + text-align: center; +} +@media only all and (max-width: 59.938em) { + #body .nav { + display: table-cell; + position: static; + top: auto; + width: 50%; + text-align: center; + height: 100px; + line-height: 100px; + padding-top: 0; + } + #body .nav > i { + display: inline-block; + } +} +#body .nav:hover { + background: #F6F6F6; +} +#body .nav.nav-pref { + left: 0; +} +#body .nav.nav-next { + right: 0; +} +#body-inner { + margin-bottom: 5rem; +} +#chapter { + display: flex; + align-items: center; + justify-content: center; + height: 100%; + padding: 2rem 0; +} +#chapter #body-inner { + padding-bottom: 3rem; + max-width: 80%; +} +#chapter h3 { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + font-weight: 300; + text-align: center; +} +#chapter h1 { + font-size: 5rem; + border-bottom: 4px solid #F0F2F4; +} +#chapter p { + text-align: center; + font-size: 1.2rem; +} +#footer { + padding: 3rem 1rem; + color: #b3b3b3; + font-size: 13px; +} +#footer p { + margin: 0; +} +body { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + font-weight: 300; + line-height: 1.6; + font-size: 18px !important; +} +h2, h3, h4, h5, h6 { + font-family: "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + text-rendering: optimizeLegibility; + color: #5e5e5e; + font-weight: 400; + letter-spacing: -1px; +} +h1 { + font-family: "Novacento Sans Wide", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + text-align: center; + text-transform: uppercase; + color: #222; + font-weight: 200; +} +blockquote { + border-left: 10px solid #F0F2F4; +} +blockquote p { + font-size: 1.1rem; + color: #999; +} +blockquote cite { + display: block; + text-align: right; + color: #666; + font-size: 1.2rem; +} +div.notices { + margin: 2rem 0; + position: relative; +} +div.notices p { + padding: 15px; + display: block; + font-size: 1rem; + margin-top: 0rem; + margin-bottom: 0rem; + color: #666; +} +div.notices p:first-child:before { + position: absolute; + top: 2px; + color: #fff; + font-family: "Font Awesome 5 Free"; + font-weight: 900; + content: "\f06a"; + left: 10px; +} +div.notices p:first-child:after { + position: absolute; + top: 2px; + color: #fff; + left: 2rem; +} +div.notices.info p { + border-top: 30px solid #F0B37E; + background: #FFF2DB; +} +div.notices.info p:first-child:after { + content: 'Info'; +} +div.notices.warning p { + border-top: 30px solid rgba(217, 83, 79, 0.8); + background: #FAE2E2; +} +div.notices.warning p:first-child:after { + content: 'Warning'; +} +div.notices.note p { + border-top: 30px solid #6AB0DE; + background: #E7F2FA; +} +div.notices.note p:first-child:after { + content: 'Note'; +} +div.notices.tip p { + border-top: 30px solid rgba(92, 184, 92, 0.8); + background: #E6F9E6; +} +div.notices.tip p:first-child:after { + content: 'Tip'; +} + +/* attachments shortcode */ + +section.attachments { + margin: 2rem 0; + position: relative; +} + +section.attachments label { + font-weight: 400; + padding-left: 0.5em; + padding-top: 0.2em; + padding-bottom: 0.2em; + margin: 0; +} + +section.attachments .attachments-files { + padding: 15px; + display: block; + font-size: 1rem; + margin-top: 0rem; + margin-bottom: 0rem; + color: #666; +} + +section.attachments.orange label { + color: #fff; + background: #F0B37E; +} + +section.attachments.orange .attachments-files { + background: #FFF2DB; +} + +section.attachments.green label { + color: #fff; + background: rgba(92, 184, 92, 0.8); +} + +section.attachments.green .attachments-files { + background: #E6F9E6; +} + +section.attachments.blue label { + color: #fff; + background: #6AB0DE; +} + +section.attachments.blue .attachments-files { + background: #E7F2FA; +} + +section.attachments.grey label { + color: #fff; + background: #505d65; +} + +section.attachments.grey .attachments-files { + background: #f4f4f4; +} + +/* Children shortcode */ + +/* Children shortcode */ +.children p { + font-size: small; + margin-top: 0px; + padding-top: 0px; + margin-bottom: 0px; + padding-bottom: 0px; +} +.children-li p { + font-size: small; + font-style: italic; + +} +.children-h2 p, .children-h3 p { + font-size: small; + margin-top: 0px; + padding-top: 0px; + margin-bottom: 0px; + padding-bottom: 0px; +} +.children h3,.children h2 { + margin-bottom: 0px; + margin-top: 5px; +} + +code, kbd, pre, samp { + font-family: "Consolas", menlo, monospace; + font-size: 92%; +} +code { + border-radius: 2px; + white-space: nowrap; + color: #5e5e5e; + background: #FFF7DD; + border: 1px solid #fbf0cb; + padding: 0px 2px; +} +code + .copy-to-clipboard { + margin-left: -1px; + border-left: 0 !important; + font-size: inherit !important; + vertical-align: middle; + height: 21px; + top: 0; +} +pre { + padding: 1rem; + margin: 2rem 0; + background: #282c34; + border: 0; + border-radius: 2px; + line-height: 1.15; +} +pre code { + color: whitesmoke; + background: inherit; + white-space: inherit; + border: 0; + padding: 0; + margin: 0; + font-size: 15px; +} +hr { + border-bottom: 4px solid #F0F2F4; +} +.page-title { + margin-top: -25px; + padding: 25px; + float: left; + clear: both; + background: #9c6fb6; + color: #fff; +} +#body a.anchor-link { + color: #ccc; +} +#body a.anchor-link:hover { + color: #9c6fb6; +} +#body-inner .tabs-wrapper.ui-theme-badges { + background: #1d1f21; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li { + font-size: 0.9rem; + text-transform: uppercase; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li a { + background: #35393c; +} +#body-inner .tabs-wrapper.ui-theme-badges .tabs-nav li.current a { + background: #4d5257; +} +#body-inner pre { + white-space: pre-wrap; +} +.tabs-wrapper pre { + margin: 1rem 0; + border: 0; + padding: 0; + background: inherit; +} +table { + border: 1px solid #eaeaea; + table-layout: auto; +} +th { + background: #f7f7f7; + padding: 0.5rem; +} +td { + padding: 0.5rem; + border: 1px solid #eaeaea; +} +.button { + background: #9c6fb6; + color: #fff; + box-shadow: 0 3px 0 #00a5d4; +} +.button:hover { + background: #00a5d4; + box-shadow: 0 3px 0 #008db6; + color: #fff; +} +.button:active { + box-shadow: 0 1px 0 #008db6; +} +.button-secondary { + background: #F8B450; + color: #fff; + box-shadow: 0 3px 0 #f7a733; +} +.button-secondary:hover { + background: #f7a733; + box-shadow: 0 3px 0 #f69b15; + color: #fff; +} +.button-secondary:active { + box-shadow: 0 1px 0 #f69b15; +} +.bullets { + margin: 1.7rem 0; + margin-left: -0.85rem; + margin-right: -0.85rem; + overflow: auto; +} +.bullet { + float: left; + padding: 0 0.85rem; +} +.two-column-bullet { + width: 50%; +} +@media only all and (max-width: 47.938em) { + .two-column-bullet { + width: 100%; + } +} +.three-column-bullet { + width: 33.33333%; +} +@media only all and (max-width: 47.938em) { + .three-column-bullet { + width: 100%; + } +} +.four-column-bullet { + width: 25%; +} +@media only all and (max-width: 47.938em) { + .four-column-bullet { + width: 100%; + } +} +.bullet-icon { + float: left; + background: #9c6fb6; + padding: 0.875rem; + width: 3.5rem; + height: 3.5rem; + border-radius: 50%; + color: #fff; + font-size: 1.75rem; + text-align: center; +} +.bullet-icon-1 { + background: #9c6fb6; +} +.bullet-icon-2 { + background: #00f3d8; +} +.bullet-icon-3 { + background: #e6f300; +} +.bullet-content { + margin-left: 4.55rem; +} +.tooltipped { + position: relative; +} +.tooltipped:after { + position: absolute; + z-index: 1000000; + display: none; + padding: 5px 8px; + font: normal normal 11px/1.5 "Work Sans", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + color: #fff; + text-align: center; + text-decoration: none; + text-shadow: none; + text-transform: none; + letter-spacing: normal; + word-wrap: break-word; + white-space: pre; + pointer-events: none; + content: attr(aria-label); + background: rgba(0, 0, 0, 0.8); + border-radius: 3px; + -webkit-font-smoothing: subpixel-antialiased; +} +.tooltipped:before { + position: absolute; + z-index: 1000001; + display: none; + width: 0; + height: 0; + color: rgba(0, 0, 0, 0.8); + pointer-events: none; + content: ""; + border: 5px solid transparent; +} +.tooltipped:hover:before, .tooltipped:hover:after, .tooltipped:active:before, .tooltipped:active:after, .tooltipped:focus:before, .tooltipped:focus:after { + display: inline-block; + text-decoration: none; +} +.tooltipped-s:after, .tooltipped-se:after, .tooltipped-sw:after { + top: 100%; + right: 50%; + margin-top: 5px; +} +.tooltipped-s:before, .tooltipped-se:before, .tooltipped-sw:before { + top: auto; + right: 50%; + bottom: -5px; + margin-right: -5px; + border-bottom-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-se:after { + right: auto; + left: 50%; + margin-left: -15px; +} +.tooltipped-sw:after { + margin-right: -15px; +} +.tooltipped-n:after, .tooltipped-ne:after, .tooltipped-nw:after { + right: 50%; + bottom: 100%; + margin-bottom: 5px; +} +.tooltipped-n:before, .tooltipped-ne:before, .tooltipped-nw:before { + top: -5px; + right: 50%; + bottom: auto; + margin-right: -5px; + border-top-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-ne:after { + right: auto; + left: 50%; + margin-left: -15px; +} +.tooltipped-nw:after { + margin-right: -15px; +} +.tooltipped-s:after, .tooltipped-n:after { + transform: translateX(50%); +} +.tooltipped-w:after { + right: 100%; + bottom: 50%; + margin-right: 5px; + transform: translateY(50%); +} +.tooltipped-w:before { + top: 50%; + bottom: 50%; + left: -5px; + margin-top: -5px; + border-left-color: rgba(0, 0, 0, 0.8); +} +.tooltipped-e:after { + bottom: 50%; + left: 100%; + margin-left: 5px; + transform: translateY(50%); +} +.tooltipped-e:before { + top: 50%; + right: -5px; + bottom: 50%; + margin-top: -5px; + border-right-color: rgba(0, 0, 0, 0.8); +} +.highlightable { + padding: 1rem 0 1rem; + overflow: auto; + position: relative; +} +.hljs::selection, .hljs span::selection { + background: #b7b7b7; +} +.lightbox-active #body { + overflow: visible; +} +.lightbox-active #body .padding { + overflow: visible; +} +#github-contrib i { + vertical-align: middle; +} +.featherlight img { + margin: 0 !important; +} +.lifecycle #body-inner ul { + list-style: none; + margin: 0; + padding: 2rem 0 0; + position: relative; +} +.lifecycle #body-inner ol { + margin: 1rem 0 1rem 0; + padding: 2rem; + position: relative; +} +.lifecycle #body-inner ol li { + margin-left: 1rem; +} +.lifecycle #body-inner ol strong, .lifecycle #body-inner ol label, .lifecycle #body-inner ol th { + text-decoration: underline; +} +.lifecycle #body-inner ol ol { + margin-left: -1rem; +} +.lifecycle #body-inner h3[class*='level'] { + font-size: 20px; + position: absolute; + margin: 0; + padding: 4px 10px; + right: 0; + z-index: 1000; + color: #fff; + background: #1ABC9C; +} +.lifecycle #body-inner ol h3 { + margin-top: 1rem !important; + right: 2rem !important; +} +.lifecycle #body-inner .level-1 + ol { + background: #f6fefc; + border: 4px solid #1ABC9C; + color: #16A085; +} +.lifecycle #body-inner .level-1 + ol h3 { + background: #2ECC71; +} +.lifecycle #body-inner .level-2 + ol { + background: #f7fdf9; + border: 4px solid #2ECC71; + color: #27AE60; +} +.lifecycle #body-inner .level-2 + ol h3 { + background: #3498DB; +} +.lifecycle #body-inner .level-3 + ol { + background: #f3f9fd; + border: 4px solid #3498DB; + color: #2980B9; +} +.lifecycle #body-inner .level-3 + ol h3 { + background: #34495E; +} +.lifecycle #body-inner .level-4 + ol { + background: #e4eaf0; + border: 4px solid #34495E; + color: #2C3E50; +} +.lifecycle #body-inner .level-4 + ol h3 { + background: #34495E; +} +#top-bar { + background: #F6F6F6; + border-radius: 2px; + padding: 0 1rem; + height: 0; + min-height: 3rem; +} +#top-github-link { + position: relative; + z-index: 1; + float: right; + display: block; +} +#body #breadcrumbs { + height: auto; + margin-bottom: 0; + padding-left: 0; + line-height: 1.4; + overflow: hidden; + white-space: nowrap; + text-overflow: ellipsis; + width: 70%; + display: inline-block; + float: left; +} +#body #breadcrumbs span { + padding: 0 0.1rem; +} +@media only all and (max-width: 59.938em) { + #sidebar { + width: 230px; + } + #body { + margin-left: 230px; + } +} +@media only all and (max-width: 47.938em) { + #sidebar { + width: 230px; + left: -230px; + } + #body { + margin-left: 0; + width: 100%; + } + .sidebar-hidden { + overflow: hidden; + } + .sidebar-hidden #sidebar { + left: 0; + } + .sidebar-hidden #body { + margin-left: 230px; + overflow: hidden; + } + .sidebar-hidden #overlay { + position: absolute; + left: 0; + right: 0; + top: 0; + bottom: 0; + z-index: 10; + background: rgba(255, 255, 255, 0.5); + cursor: pointer; + } +} +.copy-to-clipboard { + background-image: url(../images/clippy.svg); + background-position: 50% 50%; + background-size: 16px 16px; + background-repeat: no-repeat; + width: 27px; + height: 1.45rem; + top: -1px; + display: inline-block; + vertical-align: middle; + position: relative; + color: #5e5e5e; + background-color: #FFF7DD; + margin-left: -.2rem; + cursor: pointer; + border-radius: 0 2px 2px 0; + margin-bottom: 1px; +} +.copy-to-clipboard:hover { + background-color: #E8E2CD; +} +pre .copy-to-clipboard { + position: absolute; + right: 4px; + top: 4px; + background-color: #949bab; + color: #ccc; + border-radius: 2px; +} +pre .copy-to-clipboard:hover { + background-color: #656c72; + color: #fff; +} +.parent-element { + -webkit-transform-style: preserve-3d; + -moz-transform-style: preserve-3d; + transform-style: preserve-3d; +} + +#sidebar ul.topics > li > a .read-icon { + margin-top: 9px; +} + +#sidebar ul { + list-style: none; + padding: 0; + margin: 0; +} + +#sidebar #shortcuts li { + padding: 2px 0; + list-style: none; +} + +#sidebar ul li .read-icon { + display: none; + float: right; + font-size: 13px; + min-width: 16px; + margin: 4px 0 0 0; + text-align: right; +} +#sidebar ul li.visited > a .read-icon { + color: #00bdf3; + display: inline; +} + +#sidebar #shortcuts h3 { + font-family: "Novacento Sans Wide", "Helvetica", "Tahoma", "Geneva", "Arial", sans-serif; + color: white ; + margin-top:1rem; + padding-left: 1rem; +} +#homelinks { + background-color: #9c6fb6; + color: #fff; + padding: 7px 0; + border-bottom: 4px solid #9c6fb6; +} +#searchResults { + text-align: left; +} + +option { + color: initial; +} diff --git a/docs/22.2.1/fonts/Inconsolata.eot b/docs/22.2.1/fonts/Inconsolata.eot new file mode 100644 index 000000000..0a705d653 Binary files /dev/null and b/docs/22.2.1/fonts/Inconsolata.eot differ diff --git a/docs/22.2.1/fonts/Inconsolata.svg b/docs/22.2.1/fonts/Inconsolata.svg new file mode 100644 index 000000000..b7f97c875 --- /dev/null +++ b/docs/22.2.1/fonts/Inconsolata.svg @@ -0,0 +1,359 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/22.2.1/fonts/Inconsolata.ttf b/docs/22.2.1/fonts/Inconsolata.ttf new file mode 100644 index 000000000..4b8a36d24 Binary files /dev/null and b/docs/22.2.1/fonts/Inconsolata.ttf differ diff --git a/docs/22.2.1/fonts/Inconsolata.woff b/docs/22.2.1/fonts/Inconsolata.woff new file mode 100644 index 000000000..6f39625e5 Binary files /dev/null and b/docs/22.2.1/fonts/Inconsolata.woff differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.eot b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.eot new file mode 100644 index 000000000..9984682fc Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.eot differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.svg b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.svg new file mode 100644 index 000000000..c412ea8c1 --- /dev/null +++ b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.svg @@ -0,0 +1,1019 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.ttf b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.ttf new file mode 100644 index 000000000..8cfb62dd5 Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.ttf differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.woff b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.woff new file mode 100644 index 000000000..d5c429079 Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.woff differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.woff2 b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.woff2 new file mode 100644 index 000000000..eefb4a318 Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-Normal-webfont.woff2 differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.eot b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.eot new file mode 100644 index 000000000..2a26561f9 Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.eot differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.svg b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.svg new file mode 100644 index 000000000..e642ab076 --- /dev/null +++ b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.svg @@ -0,0 +1,918 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.ttf b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.ttf new file mode 100644 index 000000000..9ce9c7f99 Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.ttf differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.woff b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.woff new file mode 100644 index 000000000..381650c98 Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.woff differ diff --git a/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.woff2 b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.woff2 new file mode 100644 index 000000000..7e659549b Binary files /dev/null and b/docs/22.2.1/fonts/Novecentosanswide-UltraLight-webfont.woff2 differ diff --git a/docs/22.2.1/fonts/Work_Sans_200.eot b/docs/22.2.1/fonts/Work_Sans_200.eot new file mode 100644 index 000000000..4052e4f94 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_200.eot differ diff --git a/docs/22.2.1/fonts/Work_Sans_200.svg b/docs/22.2.1/fonts/Work_Sans_200.svg new file mode 100644 index 000000000..58ab4ba22 --- /dev/null +++ b/docs/22.2.1/fonts/Work_Sans_200.svg @@ -0,0 +1,332 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/22.2.1/fonts/Work_Sans_200.ttf b/docs/22.2.1/fonts/Work_Sans_200.ttf new file mode 100644 index 000000000..68019e1cc Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_200.ttf differ diff --git a/docs/22.2.1/fonts/Work_Sans_200.woff b/docs/22.2.1/fonts/Work_Sans_200.woff new file mode 100644 index 000000000..a1bd9e469 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_200.woff differ diff --git a/docs/22.2.1/fonts/Work_Sans_200.woff2 b/docs/22.2.1/fonts/Work_Sans_200.woff2 new file mode 100644 index 000000000..20c68a75c Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_200.woff2 differ diff --git a/docs/22.2.1/fonts/Work_Sans_300.eot b/docs/22.2.1/fonts/Work_Sans_300.eot new file mode 100644 index 000000000..ace799382 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_300.eot differ diff --git a/docs/22.2.1/fonts/Work_Sans_300.svg b/docs/22.2.1/fonts/Work_Sans_300.svg new file mode 100644 index 000000000..f29d0c8a1 --- /dev/null +++ b/docs/22.2.1/fonts/Work_Sans_300.svg @@ -0,0 +1,331 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/22.2.1/fonts/Work_Sans_300.ttf b/docs/22.2.1/fonts/Work_Sans_300.ttf new file mode 100644 index 000000000..35387c235 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_300.ttf differ diff --git a/docs/22.2.1/fonts/Work_Sans_300.woff b/docs/22.2.1/fonts/Work_Sans_300.woff new file mode 100644 index 000000000..8d789eae9 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_300.woff differ diff --git a/docs/22.2.1/fonts/Work_Sans_300.woff2 b/docs/22.2.1/fonts/Work_Sans_300.woff2 new file mode 100644 index 000000000..f6e216d64 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_300.woff2 differ diff --git a/docs/22.2.1/fonts/Work_Sans_500.eot b/docs/22.2.1/fonts/Work_Sans_500.eot new file mode 100644 index 000000000..9df692942 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_500.eot differ diff --git a/docs/22.2.1/fonts/Work_Sans_500.svg b/docs/22.2.1/fonts/Work_Sans_500.svg new file mode 100644 index 000000000..4b030b790 --- /dev/null +++ b/docs/22.2.1/fonts/Work_Sans_500.svg @@ -0,0 +1,333 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/docs/22.2.1/fonts/Work_Sans_500.ttf b/docs/22.2.1/fonts/Work_Sans_500.ttf new file mode 100644 index 000000000..5b8cc5342 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_500.ttf differ diff --git a/docs/22.2.1/fonts/Work_Sans_500.woff b/docs/22.2.1/fonts/Work_Sans_500.woff new file mode 100644 index 000000000..df058514f Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_500.woff differ diff --git a/docs/22.2.1/fonts/Work_Sans_500.woff2 b/docs/22.2.1/fonts/Work_Sans_500.woff2 new file mode 100644 index 000000000..b06c54df0 Binary files /dev/null and b/docs/22.2.1/fonts/Work_Sans_500.woff2 differ diff --git a/docs/22.2.1/images/.gitkeep b/docs/22.2.1/images/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/docs/22.2.1/images/clippy.svg b/docs/22.2.1/images/clippy.svg new file mode 100644 index 000000000..1c8abc2fd --- /dev/null +++ b/docs/22.2.1/images/clippy.svg @@ -0,0 +1 @@ + diff --git a/docs/22.2.1/images/favicon.png b/docs/22.2.1/images/favicon.png new file mode 100644 index 000000000..df06e35d6 Binary files /dev/null and b/docs/22.2.1/images/favicon.png differ diff --git a/docs/22.2.1/images/fmw_12c_12_2_1_4_0-logo.png b/docs/22.2.1/images/fmw_12c_12_2_1_4_0-logo.png new file mode 100644 index 000000000..6a2d34fff Binary files /dev/null and b/docs/22.2.1/images/fmw_12c_12_2_1_4_0-logo.png differ diff --git a/docs/22.2.1/images/gopher-404.jpg b/docs/22.2.1/images/gopher-404.jpg new file mode 100644 index 000000000..2a5054389 Binary files /dev/null and b/docs/22.2.1/images/gopher-404.jpg differ diff --git a/docs/22.2.1/images/logo.png b/docs/22.2.1/images/logo.png new file mode 100644 index 000000000..6bfe10627 Binary files /dev/null and b/docs/22.2.1/images/logo.png differ diff --git a/docs/22.2.1/images/soa-domains/CreateApplicationServerConnection.jpg b/docs/22.2.1/images/soa-domains/CreateApplicationServerConnection.jpg new file mode 100644 index 000000000..e69f13ac0 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/CreateApplicationServerConnection.jpg differ diff --git a/docs/22.2.1/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg b/docs/22.2.1/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg new file mode 100644 index 000000000..84796fec9 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/CreateApplicationServerConnectionTestConnection.jpg differ diff --git a/docs/22.2.1/images/soa-domains/ExposeSOAMST3.png b/docs/22.2.1/images/soa-domains/ExposeSOAMST3.png new file mode 100644 index 000000000..119d72c67 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/ExposeSOAMST3.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png new file mode 100644 index 000000000..26adedcf5 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deploy_Success_Status.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_Deploying_Progress.png b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deploying_Progress.png new file mode 100644 index 000000000..c834a7852 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deploying_Progress.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Start.png b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Start.png new file mode 100644 index 000000000..e15c9d7a4 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Start.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg new file mode 100644 index 000000000..303ab5151 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Summary.jpg differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Summary.png b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Summary.png new file mode 100644 index 000000000..303ab5151 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_Deployment_Summary.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_Select_Application_Server.png b/docs/22.2.1/images/soa-domains/JDEV_OSB_Select_Application_Server.png new file mode 100644 index 000000000..1ea311e9c Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_Select_Application_Server.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png b/docs/22.2.1/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png new file mode 100644 index 000000000..ae3fecbf8 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_Select_Deployment_Action.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png b/docs/22.2.1/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png new file mode 100644 index 000000000..a01fe9f55 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_OSB_servicebus_launch_test_console.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_Reference_Config_Settings.png b/docs/22.2.1/images/soa-domains/JDEV_Reference_Config_Settings.png new file mode 100644 index 000000000..549fa9390 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_Reference_Config_Settings.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploy_Configuration.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploy_Configuration.png new file mode 100644 index 000000000..8d4320237 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploy_Configuration.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png new file mode 100644 index 000000000..84a132f0b Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploy_Success_Status.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploying_Progress.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploying_Progress.png new file mode 100644 index 000000000..ef49c887c Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deploying_Progress.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Deployment_Start.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deployment_Start.png new file mode 100644 index 000000000..45cb532c7 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deployment_Start.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Deployment_Summary.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deployment_Summary.png new file mode 100644 index 000000000..02ac26fdc Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Deployment_Summary.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Select_Application_Server.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Select_Application_Server.png new file mode 100644 index 000000000..f751e0c56 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Select_Application_Server.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png new file mode 100644 index 000000000..a4eaf58fb Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Select_Deployment_Action.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Server_Lookup.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Server_Lookup.png new file mode 100644 index 000000000..fae2f2378 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Server_Lookup.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_Target_soa_servers.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_Target_soa_servers.png new file mode 100644 index 000000000..5b8cd53f6 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_Target_soa_servers.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_soainfra_server1.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_soainfra_server1.png new file mode 100644 index 000000000..9f225df7c Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_soainfra_server1.png differ diff --git a/docs/22.2.1/images/soa-domains/JDEV_SOA_soainfra_server2.png b/docs/22.2.1/images/soa-domains/JDEV_SOA_soainfra_server2.png new file mode 100644 index 000000000..4e5e1f768 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/JDEV_SOA_soainfra_server2.png differ diff --git a/docs/22.2.1/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png b/docs/22.2.1/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png new file mode 100644 index 000000000..baaf4f82b Binary files /dev/null and b/docs/22.2.1/images/soa-domains/SOA_Composites_Deploy_using_Jdev.png differ diff --git a/docs/22.2.1/images/soa-domains/custIdentity-custTrust-keystores.png b/docs/22.2.1/images/soa-domains/custIdentity-custTrust-keystores.png new file mode 100644 index 000000000..290492743 Binary files /dev/null and b/docs/22.2.1/images/soa-domains/custIdentity-custTrust-keystores.png differ diff --git a/docs/22.2.1/images/soasuite-logo.png b/docs/22.2.1/images/soasuite-logo.png new file mode 100644 index 000000000..347663f5b Binary files /dev/null and b/docs/22.2.1/images/soasuite-logo.png differ diff --git a/docs/22.2.1/index.html b/docs/22.2.1/index.html new file mode 100644 index 000000000..869dbd7a5 --- /dev/null +++ b/docs/22.2.1/index.html @@ -0,0 +1,3698 @@ + + + + + + + + + + + + Oracle Fusion Middleware on Kubernetes :: Oracle Fusion Middleware on Kubernetes + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ +
+ +
+ +
+ + + + + + + navigation + + + +

Oracle Fusion Middleware on Kubernetes

+

Oracle supports the deployment of the following Oracle Fusion Middleware products on Kubernetes. Click on the appropriate document link below to get started on setting up the product.

+ + + + + + + +
    + + + + + + + + + + + + + + + + + + + +

    +Oracle Access Management +

    + + + + + +

    The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM). Follow the instructions in this guide to set up these Oracle Access Management domains on Kubernetes.

    + + + + + + + + + + + + +

    +Oracle Identity Governance +

    + + + + + +

    The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes.

    + + + + + + + + + + + + +

    +Oracle Unified Directory +

    + + + + + +

    Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management

    + + + + + + + + + + + + +

    +Oracle Unified Directory Services Manager +

    + + + + + +

    Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory

    + + + + + + + + + + + + +

    +Oracle Internet Directory +

    + + + + + +

    Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management

    + + + + + + + + +
+ + + + + + + +
+ + +
+ + + +
+ +
+
+
+ + + + + + + + + + + + + + + + + + + diff --git a/docs/22.2.1/index.json b/docs/22.2.1/index.json new file mode 100644 index 000000000..639a62b49 --- /dev/null +++ b/docs/22.2.1/index.json @@ -0,0 +1,617 @@ +[ +{ + "uri": "/fmw-kubernetes/22.2.1/", + "title": "Oracle Fusion Middleware on Kubernetes", + "tags": [], + "description": "This document lists all the Oracle Fusion Middleware products deployment supported on Kubernetes.", + "content": "Oracle Fusion Middleware on Kubernetes Oracle supports the deployment of the following Oracle Fusion Middleware products on Kubernetes. Click on the appropriate document link below to get started on setting up the product.\n Oracle Access Management The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM). Follow the instructions in this guide to set up these Oracle Access Management domains on Kubernetes.\n Oracle Identity Governance The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes.\n Oracle Unified Directory Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management\n Oracle Unified Directory Services Manager Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory\n Oracle Internet Directory Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/", + "title": "a. Using Design Console with NGINX(non-SSL)", + "tags": [], + "description": "Configure Design Console with NGINX(non-SSL).", + "content": "Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster.\n Prerequisites\n Setup routing rules for the Design Console ingress\n Create the ingress\n Update the T3 channel\n Restart the OIG Managed Server\n Design Console client\na. Using an on-premises installed Design Console\nb. Using a container image for Design Console\n Login to the Design Console\n Prerequisites If you haven\u0026rsquo;t already configured an NGINX ingress controller (Non-SSL) for OIG, follow Using an Ingress with NGINX (non-SSL).\nMake sure you know the master hostname and ingress port for NGINX before proceeding e.g http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.\nNote: In all steps below if you are using a load balancer for your ingress instead of NodePort then replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with `${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\nSetup routing rules for the Design Console ingress Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/design-console-ingress Edit values.yaml and ensure that tls: NONSSL and domainUID: governancedomain are set, for example:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: NONSSL # TLS secret name if the mode is SSL secretName: dc-tls-cert # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster oimServerT3Port: 14002 Create the ingress Run the following command to create the ingress:\n$ cd $WORKDIR $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml For example:\nThe output will look similar to the following:\nNAME: governancedomain-nginx-designconsole LAST DEPLOYED: Thu Mar 10 14:32:16 2022 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl describe ing governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx-designconsole -n oigns The output will look similar to the following:\nName: governancedomain-nginx-designconsole Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * governancedomain-cluster-oim-cluster:14002 (10.244.1.25:14002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/enable-access-log: false Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 13s nginx-ingress-controller Scheduled for sync Update the T3 channel Log in to the WebLogic Console using http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.\n Navigate to Environment, click Servers, and then select oim_server1.\n Click Protocols, and then Channels.\n Click the default T3 channel called T3Channel.\n Click Lock and Edit.\n Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}.\n Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}.\n Click Save.\n Click Activate Changes.\n Restart the OIG Managed Server Restart the OIG Managed Server for the above changes to take effect:\n$ cd $WORKDIR/kubernetes/domain-lifecycle $ ./restartServer.sh -s oim_server1 -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/domain-lifecycle ./restartServer.sh -s oim_server1 -d governancedomain -n oigns Make sure the \u0026lt;domain_uid\u0026gt;-oim-server1 has a READY status of 1/1 before continuing:\n$ kubectl get pods -n oigns | grep oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 8m Design Console client It is possible to use Design Console from an on-premises install, or from a container image.\nUsing an on-premises installed Design Console Install Design Console on an on-premises machine\n Follow Login to the Design Console.\n Using a container image for Design Console Using Docker The Design Console can be run from a container using X windows emulation.\n On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ docker images Then execute the following command to start a container to run Design Console:\n$ docker run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359 bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ docker commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ docker commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ docker run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Using podman On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ podman images Then execute the following command to start a container to run Design Console:\n$ podman run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359 bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ podman commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ podman commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ podman run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Login to the Design Console Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:\nEnter the following details and click Login:\n Server URL: \u0026lt;url\u0026gt; User ID: xelsysadm Password: \u0026lt;password\u0026gt;. where \u0026lt;url\u0026gt; is http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}\n If successful the Design Console will be displayed.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/manage-oig-domains/domain-lifecycle/", + "title": "Domain life cycle", + "tags": [], + "description": "Learn about the domain life cyle of an OIG domain.", + "content": " View existing OIG servers Starting/Scaling up OIG Managed servers Stopping/Scaling down OIG Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.\nThis document shows the basic operations for starting, stopping and scaling servers in the OIG domain.\nFor more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.\nDo not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.\n View existing OIG Servers The default OIG deployment starts the Administration Server (AdminServer), one OIG Managed Server (oim_server1) and one SOA Managed Server (soa_server1).\nThe deployment also creates, but doesn\u0026rsquo;t start, four extra OIG Managed Servers (oim-server2 to oim-server5) and four more SOA Managed Servers (soa_server2 to soa_server5).\nAll these servers are visible in the WebLogic Server Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure \u0026gt; governancedomain \u0026gt; Environment \u0026gt; Servers.\nTo view the running servers using kubectl, run the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h Starting/Scaling up OIG Managed Servers The number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To start more OIG Managed Servers perform the following steps:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\n In the edit session search for clusterName: oim_cluster and look for the replicas parameter. By default the replicas parameter is set to \u0026ldquo;1\u0026rdquo; hence a single OIG Managed Server is started (oim_server1):\n - clusterName: oim_cluster replicas: 1 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To start more OIG Managed Servers, increase the replicas value as desired. In the example below, one more Managed Server will be started by setting replicas to \u0026ldquo;2\u0026rdquo;:\n - clusterName: oim_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq)\nThe output will look similar to the following:\ndomain.weblogic.oracle/governancedomain edited Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 0/1 Running 0 7s governancedomain-soa-server1 1/1 Running 0 23h One new pod (governancedomain-oim-server2) is started, but currently has a READY status of 0/1. This means oim_server2 is not currently running but is in the process of starting. The server will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Running 0 5m27s governancedomain-soa-server1 1/1 Running 0 23h Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs governancedomain-oim-server2 -n oigns Stopping/Scaling down OIG Managed Servers As mentioned in the previous section, the number of OIG Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OIG Managed Servers, perform the following:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns In the edit session search for clusterName: oim_cluster and look for the replicas parameter. In the example below replicas is set to \u0026ldquo;2\u0026rdquo; hence two OIG Managed Servers are started (oim_server1 and oim_server2):\n - clusterName: oim_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To stop OIG Managed Servers, decrease the replicas value as desired. In the example below, we will stop one Managed Server by setting replicas to \u0026ldquo;1\u0026rdquo;:\n - clusterName: oim_cluster replicas: 1 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq)\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-oim-server2 1/1 Terminating 0 7m30s governancedomain-soa-server1 1/1 Running 0 23h The exiting pod shows a STATUS of Terminating (governancedomain-oim-server2). The server may take a minute or two to stop, so keep executing the command until the pod has disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 23h governancedomain-soa-server1 1/1 Running 0 23h Stopping and Starting the Administration Server and Managed Servers To stop all the OIG Managed Servers and the Administration Server in one operation:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns In the edit session search for serverStartPolicy: IF_NEEDED:\n volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: IF_NEEDED Change serverStartPolicy: IF_NEEDED to NEVER as follows:\n volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: NEVER Save the file and exit (:wq).\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 23h governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 23h governancedomain-soa-server1 1/1 Terminating 0 23h The AdminServer pod and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: NEVER to IF_NEEDED as follows:\n volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc serverStartPolicy: IF_NEEDED Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 0/1 Running 0 4s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 6m57s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 4m33s governancedomain-soa-server1 1/1 Running 0 4m33s Domain lifecycle sample scripts The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.\nNote: Prior to running these scripts, you must have previously created and deployed the domain.\nThe scripts are located in the $WORKDIR/kubernetes/domain-lifecycle directory. For more information, see the README.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Access Management on Kubernetes.\nRecent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November, 2021 21.4.2 Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Addtional post configuration tasks added. D) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Access Management on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Internet Directory on Kubernetes.\nRecent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. October, 2021 21.4.1 Initial release of Oracle Identity Directory on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Identity Governance on Kubernetes.\nRecent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November, 2021 21.4.2 Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Addtional post configuration tasks added. D) New section on how to start Design Console in a container. E) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Identity Governance on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Unified Directory on Kubernetes.\nRecent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Unified Directory on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/release-notes/", + "title": "Release Notes", + "tags": [], + "description": "", + "content": "Review the latest changes and known issues for Oracle Unified Directory Services Manager on Kubernetes.\nRecent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. C) Upgrading a Kubernetes Cluster and Security Hardening removed as vendor specific. November 2020 20.4.1 Initial release of Oracle Unified Directory Services Manager on Kubernetes. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/configure-design-console/using-the-design-console-with-nginx-ssl/", + "title": "b. Using Design Console with NGINX(SSL)", + "tags": [], + "description": "Configure Design Console with NGINX(SSL).", + "content": "Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster.\n Prerequisites\n Setup routing rules for the Design Console ingress\n Create the ingress\n Update the T3 channel\n Restart the OIG Managed Server\n Design Console client\na. Using an on-premises installed Design Console\nb. Using a container image for Design Console\n Login to the Design Console\n Prerequisites If you haven\u0026rsquo;t already configured an NGINX ingress controller (SSL) for OIG, follow Using an Ingress with NGINX (SSL).\nMake sure you know the master hostname and ingress port for NGINX before proceeding e.g https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}. Also make sure you know the Kubernetes secret for SSL that was generated e.g governancedomain-tls-cert.\nSetup routing rules for the Design Console ingress Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/design-console-ingress Edit values.yaml and ensure that tls: SSL is set. Change domainUID: and secretName: to match the values for your \u0026lt;domain_uid\u0026gt; and your SSL Kubernetes secret, for example:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: SSL # TLS secret name if the mode is SSL secretName: governancedomain-tls-cert # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain oimClusterName: oim_cluster oimServerT3Port: 14002 Create the ingress Run the following command to create the ingress:\n$ cd $WORKDIR $ helm install governancedomain-nginx-designconsole kubernetes/design-console-ingress --namespace oigns --values kubernetes/design-console-ingress/values.yaml The output will look similar to the following:\nNAME: governancedomain-nginx-designconsole Mon Thu Mar 10 14:42:16 2022 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl describe ing governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx-designconsole -n oigns The output will look similar to the following:\nName: governancedomain-nginx-designconsole Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * governancedomain-cluster-oim-cluster:14002 (10.244.2.103:14002) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx-designconsole meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 6s nginx-ingress-controller Scheduled for sync Update the T3 channel Log in to the WebLogic Console using https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console.\n Navigate to Environment, click Servers, and then select oim_server1.\n Click Protocols, and then Channels.\n Click the default T3 channel called T3Channel.\n Click Lock and Edit.\n Set the External Listen Address to the ingress controller hostname ${MASTERNODE-HOSTNAME}.\n Set the External Listen Port to the ingress controller port ${MASTERNODE-PORT}.\n Click Save.\n Click Activate Changes.\n Restart the OIG Managed Server Restart the OIG Managed Server for the above changes to take effect:\n$ cd $WORKDIR/kubernetes/domain-lifecycle $ ./restartServer.sh -s oim_server1 -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/domain-lifecycle ./restartServer.sh -s oim_server1 -d governancedomain -n oigns Make sure the \u0026lt;domain_uid\u0026gt;-oim-server1 has a READY status of 1/1 before continuing:\n$ kubectl get pods -n oigns | grep oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 8m Design Console Client It is possible to use Design Console from an on-premises install, or from a container image.\nUsing an on-premises installed Design Console The instructions below should be performed on the client where Design Console is installed.\n Import the CA certificate into the java keystore\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must import the CA certificate (e.g cacert.crt) that signed your certificate, into the java truststore used by Design Console.\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must import the self-signed certificate into the java truststore used by Design Console.\nImport the certificate using the following command:\n$ keytool -import -trustcacerts -alias dc -file \u0026lt;certificate\u0026gt; -keystore $JAVA_HOME/jre/lib/security/cacerts where \u0026lt;certificate\u0026gt; is the CA certificate, or self-signed certicate.\n Once complete follow Login to the Design Console.\n Using a container image for Design Console Using Docker The Design Console can be run from a container using X windows emulation.\n On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 31m 10.244.2.98 worker-node2 On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ docker images Then execute the following command to start a container to run Design Console:\n$ docker run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ docker run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359 bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ docker commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ docker commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ docker run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# Copy the Ingress CA certificate into the container\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container\nNote: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.\nRun the following command outside the container:\n$ cd \u0026lt;workdir\u0026gt;/ssl $ docker cp \u0026lt;certificate\u0026gt; \u0026lt;container_name\u0026gt;:/u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; For example:\n$ cd /scratch/OIGK8S/ssl $ docker cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt Import the certificate using the following command:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; -keystore /u01/jdk/jre/lib/security/cacerts For example:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Using podman On the parent machine where the Design Console is to be displayed, run xhost +.\n Find which worker node the \u0026lt;domain\u0026gt;-oim-server1 pod is running. For example:\n$ kubectl get pods -n oigns -o wide | grep governancedomain-oim-server1 The output will look similar to the following:\ngovernancedomain-oim-server1 1/1 Running 0 19h 10.244.2.55 worker-node2 \u0026lt;none\u0026gt; On the worker node returned above e.g worker-node2, execute the following command to find the OIG container image name:\n$ podman images Then execute the following command to start a container to run Design Console:\n$ podman run -u root --name oigdcbase -it \u0026lt;image\u0026gt; bash For example:\n$ podman run -u root -it --name oigdcbase container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359 bash This will take you into a bash shell inside the container:\nbash-4.2# Inside the container set the proxy, for example:\nbash-4.2# export https_proxy=http://proxy.example.com:80 Install the relevant X windows packages in the container:\nbash-4.2# yum install libXext libXrender libXtst Execute the following outside the container to create a new Design Console image from the container:\n$ podman commit \u0026lt;container_name\u0026gt; \u0026lt;design_console_image_name\u0026gt; For example:\n$ podman commit oigdcbase oigdc Exit the container bash session:\nbash-4.2# exit Start a new container using the Design Console image:\n$ podman run --name oigdc -it oigdc /bin/bash This will take you into a bash shell for the container:\nbash-4.2# Copy the Ingress CA certificate into the container\nIf in Generate SSL Certificate you requested a certificate from a Certificate Authority (CA), then you must copy the CA certificate (e.g cacert.crt) that signed your certificate, into the container\nIf in Generate SSL Certificate you generated a self-signed certicate (e.g tls.crt), you must copy the self-signed certificate into the container\nNote: You will have to copy the certificate over to the worker node where the oigdc image is created before running the following.\nRun the following command outside the container:\n$ cd \u0026lt;workdir\u0026gt;/ssl $ podman cp \u0026lt;certificate\u0026gt; \u0026lt;container_name\u0026gt;:/u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; For example:\n$ cd /scratch/OIGK8S/ssl $ podman cp tls.crt oigdc:/u01/jdk/jre/lib/security/tls.crt Inside the container, import the certificate using the following command:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/\u0026lt;certificate\u0026gt; -keystore /u01/jdk/jre/lib/security/cacerts For example:\nbash-4.2# /u01/jdk/bin/keytool -import -trustcacerts -alias dc -file /u01/jdk/jre/lib/security/tls.crt -keystore /u01/jdk/jre/lib/security/cacerts In the container run the following to export the DISPLAY:\n$ export DISPLAY=\u0026lt;parent_machine_hostname:1\u0026gt; Start the Design Console from the container:\nbash-4.2# cd idm/designconsole bash-4.2# sh xlclient.sh The Design Console login should be displayed. Now follow Login to the Design Console.\n Login to the Design Console Launch the Design Console and in the Oracle Identity Manager Design Console login page enter the following details:\nEnter the following details and click Login:\n Server URL: \u0026lt;url\u0026gt; User ID: xelsysadm Password: \u0026lt;password\u0026gt;. where \u0026lt;url\u0026gt; is where \u0026lt;url\u0026gt; is https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}.\n If successful the Design Console will be displayed.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/", + "title": "Oracle Access Management", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM). Follow the instructions in this guide to set up these Oracle Access Management domains on Kubernetes.", + "content": "The WebLogic Kubernetes Operator supports deployment of Oracle Access Management (OAM).\nIn this release, OAM domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).\nThe WebLogic Kubernetes Operator has several key features to assist you with deploying and managing Oracle Access Management domains in a Kubernetes environment. You can:\n Create OAM instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the OAM Services through external access. Scale OAM domains by starting and stopping Managed Servers on demand. Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. Monitor the OAM instance using Prometheus and Grafana. Current production release The current production release for the Oracle Access Management domain deployment on Kubernetes is 22.2.1. This release uses the WebLogic Kubernetes Operator version 3.3.0.\nThis release of the documentation can also be used for 3.1.X and 3.2.0 WebLogic Kubernetes Operator. For 3.0.X WebLogic Kubernetes Operator refer to Version 21.4.1\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Access Management domain deployment on Kubernetes.\nLimitations See here for limitations in this release.\nGetting started This documentation explains how to configure OAM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment , start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment where multiple Oracle Identity Management products are deployed, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nDocumentation for earlier releases To view documentation for an earlier release, see:\n Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/", + "title": "Oracle Identity Governance", + "tags": [], + "description": "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance. Follow the instructions in this guide to set up Oracle Identity Governance domains on Kubernetes.", + "content": "The WebLogic Kubernetes Operator supports deployment of Oracle Identity Governance (OIG).\nIn this release, OIG domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).\nThe operator has several key features to assist you with deploying and managing OIG domains in a Kubernetes environment. You can:\n Create OIG instances in a Kubernetes persistent volume. This persistent volume can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the OIG Services for external access. Scale OIG domains by starting and stopping Managed Servers on demand. Publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. Monitor the OIG instance using Prometheus and Grafana. Current production release The current production release for the Oracle Identity Governance domain deployment on Kubernetes is 22.2.1. This release uses the WebLogic Kubernetes Operator version 3.3.0.\nThis release of the documentation can also be used for 3.1.X and 3.2.0 WebLogic Kubernetes Operator. For 3.0.X WebLogic Kubernetes Operator refer to Version 21.4.1\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Identity Governance domain deployment on Kubernetes.\nLimitations See here for limitations in this release.\nGetting started This documentation explains how to configure OIG on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nDocumentation for earlier releases To view documentation for an earlier release, see:\n Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/", + "title": "Oracle Unified Directory", + "tags": [], + "description": "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management", + "content": "Oracle Unified Directory provides a comprehensive Directory Solution for robust Identity Management. Oracle Unified Directory is an all-in-one directory solution with storage, proxy, synchronization and virtualization capabilities. While unifying the approach, it provides all the services required for high-performance Enterprise and carrier-grade environments. Oracle Unified Directory ensures scalability to billions of entries, ease of installation, elastic deployments, enterprise manageability and effective monitoring.\nThis project supports deployment of Oracle Unified Directory (OUD) container images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The OUD container image refers to binaries for OUD Release 12.2.1.4.0 and it has the capability to create different types of OUD Instances (Directory Service, Proxy, Replication) in containers.\nThis project has several key features to assist you with deploying and managing Oracle Unified Directory in a Kubernetes environment. You can:\n Create Oracle Unified Directory instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the Oracle Unified Directory services for external access. Scale Oracle Unified Directory by starting and stopping servers on demand. Monitor the Oracle Unified Directory instance using Prometheus and Grafana. Current production release The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 22.2.1.\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.\nGetting started This documentation explains how to configure OUD on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nDocumentation for earlier releases To view documentation for an earlier release, see:\n Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/", + "title": "Oracle Unified Directory Services Manager", + "tags": [], + "description": "Oracle Unified Directory Services Manager provides an interface for managing instances of Oracle Unified Directory", + "content": "Oracle Unified Directory Services Manager (OUDSM) is an interface for managing instances of Oracle Unified Directory. Oracle Unified Directory Services Manager enables you to configure the structure of the directory, define objects in the directory, add and configure users, groups, and other entries. Oracle Unified Directory Services Manager is also the interface you use to manage entries, schema, security, and other directory features.\nThis project supports deployment of Oracle Unified Directory Services Manager images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The Oracle Unified Directory Services Manager Image refers to binaries for Oracle Unified Directory Services Manager Release 12.2.1.4.0.\nFollow the instructions in this guide to set up Oracle Unified Directory Services Manager on Kubernetes.\nCurrent production release The current production release for the Oracle Unified Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 22.2.1.\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Unified Directory deployment on Kubernetes.\nGetting started This documentation explains how to configure OUDSM on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nDocumentation for earlier releases To view documentation for an earlier release, see:\n Version 21.4.2 Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "System requirements and limitations for deploying and running an OAM domain home", + "content": "Introduction This document provides information about the system requirements and limitations for deploying and running OAM domains with the WebLogic Kubernetes Operator 3.3.0.\nSystem requirements for oam domains A running Kubernetes cluster that meets the following requirements:\n The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources and run the WebLogic Kubernetes Operator in a Kubernetes cluster A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. You must have the cluster-admin role to install the WebLogic Kubernetes Operator. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. The system clocks on node of the Kubernetes cluster must be synchronized. Run the date command simultaneously on all the nodes in each cluster and then syncrhonize accordingly. A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OAM as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases.\n Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information.\nLimitations Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OAM domains:\n In this release, OAM domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV).The \u0026ldquo;domain in image\u0026rdquo; model is not supported. Only configured clusters are supported. Dynamic clusters are not supported for OAM domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time. The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. We do not currently support running OAM in non-Linux containers. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Prerequisites for deploying and running Oracle Internet Directory in a Kubernetes environment.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Internet Directory 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Internet Directory on Kubernetes A running Kubernetes cluster that meets the following requirements:\n The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources on the Kubernetes cluster. A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OID as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases.\n Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "System requirements and limitations for deploying and running an OIG domain", + "content": "Introduction This document provides information about the system requirements and limitations for deploying and running OIG domains with the WebLogic Kubernetes Operator 3.3.0.\nSystem requirements for OIG domains A running Kubernetes cluster that meets the following requirements:\n The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources and run the WebLogic Kubernetes Operator in a Kubernetes cluster A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. You must have the cluster-admin role to install the WebLogic Kubernetes Operator. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. The system clocks on node of the Kubernetes cluster must be synchronized. Run the date command simultaneously on all the nodes in each cluster and then syncrhonize accordingly. A running Oracle Database 12.2.0.1 or later. The database must be a supported version for OIG as outlined in Oracle Fusion Middleware 12c certifications. It must meet the requirements as outlined in About Database Requirements for an Oracle Fusion Middleware Installation and in RCU Requirements for Oracle Databases.\n Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information.\nLimitations Compared to running a WebLogic Server domain in Kubernetes using the operator, the following limitations currently exist for OIG domains:\n In this release, OIG domains are supported using the “domain on a persistent volume” model only, where the domain home is located in a persistent volume (PV). The \u0026ldquo;domain in image\u0026rdquo; model is not supported. Only configured clusters are supported. Dynamic clusters are not supported for OIG domains. Note that you can still use all of the scaling features, you just need to define the maximum size of your cluster at domain creation time. The WebLogic Monitoring Exporter currently supports the WebLogic MBean trees only. Support for JRF MBeans has not been added yet. We do not currently support running OIG in non-Linux containers. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Oracle Unified Directory Prerequisites.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Unified Directory 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Unified Directory on Kubernetes A running Kubernetes cluster that meets the following requirements: The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources on the Kubernetes cluster. A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/prerequisites/", + "title": "Prerequisites", + "tags": [], + "description": "Oracle Unified Directory Services Manager Prerequisites.", + "content": "Introduction This document provides information about the system requirements for deploying and running Oracle Unified Directory Services Manager 12c PS4 (12.2.1.4.0) in a Kubernetes environment.\nSystem Requirements for Oracle Unified Directory Services Manager on Kubernetes A running Kubernetes cluster that meets the following requirements: The Kubernetes cluster must have sufficient nodes and resources. An installation of Helm is required on the Kubernetes cluster. Helm is used to create and deploy the necessary resources on the Kubernetes cluster. A supported container engine must be installed and running on the Kubernetes cluster. The Kubernetes cluster and container engine must meet the minimum version requirements outlined in document ID 2723908.1 on My Oracle Support. The nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system. Note: This documentation does not tell you how to install a Kubernetes cluster, Helm, the container engine, or how to push container images to a container registry. Please refer to your vendor specific documentation for this information.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/manage-oig-domains/wlst-admin-operations/", + "title": "WLST administration operations", + "tags": [], + "description": "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.", + "content": "Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain.\n Check to see if the helper pod exists by running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; | grep helper For example:\n$ kubectl get pods -n oigns | grep helper The output should look similar to the following:\nhelper 1/1 Running 0 26h If the helper pod doesn\u0026rsquo;t exist then see Step 1 in Prepare your environment to create it.\n Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ Connect to WLST using the following commands:\n[oracle@helper ~]$ cd $ORACLE_HOME/oracle_common/common/bin [oracle@helper ~]$ ./wlst.sh The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To access t3 for the Administration Server connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://governancedomain-adminserver:7001\u0026#39;) The output will look similar to the following:\nConnecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/governancedomain/serverConfig/\u0026gt; Or to access t3 for the OIG Cluster service, connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://governancedomain-cluster-oim-cluster:14000\u0026#39;) The output will look similar to the following:\nConnecting to t3://governancedomain-cluster-oim-cluster:14000 with userid weblogic ... Successfully connected to managed Server \u0026quot;oim_server1\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/governancedomain/serverConfig/\u0026gt; Sample operations For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.\nDisplay servers wls:/governancedomain/serverConfig/\u0026gt; cd('/Servers') wls:/governancedomain/serverConfig/Servers\u0026gt; ls () dr-- AdminServer dr-- oim_server1 dr-- oim_server2 dr-- oim_server3 dr-- oim_server4 dr-- oim_server5 dr-- soa_server1 dr-- soa_server2 dr-- soa_server3 dr-- soa_server4 dr-- soa_server5 wls:/governancedomain/serverConfig/Servers\u0026gt; Performing WLST administration via SSL By default the SSL port is not enabled for the Administration Server or OIG Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock \u0026amp; Edit -\u0026gt; Environment -\u0026gt;Servers -\u0026gt; server_name -\u0026gt;Configuration -\u0026gt; General -\u0026gt; SSL Listen Port Enabled -\u0026gt; Provide SSL Port ( For Administration Server: 7002 and for OIG Managed Server (oim_server1): 14101) - \u0026gt; Save -\u0026gt; Activate Changes.\nNote: If configuring the OIG Managed Servers for SSL you must enable SSL on the same port for all servers (oim_server1 through oim_server4)\n Create a myscripts directory as follows:\n$ cd $WORKDIR/kubernetes $ mkdir myscripts $ cd myscripts Create a sample yaml template file in the myscripts directory called \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:\nNote: Update the domainName, domainUID and namespace based on your environment.\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: governancedomain weblogic.domainUID: governancedomain weblogic.resourceVersion: domain-v2 weblogic.serverName: AdminServer name: governancedomain-adminserver-ssl namespace: oigns spec: clusterIP: None ports: - name: default port: 7002 protocol: TCP targetPort: 7002 selector: weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: governancedomain weblogic.serverName: AdminServer type: ClusterIP and create the following sample yaml template file \u0026lt;domain_uid\u0026gt;-oim-cluster-ssl.yaml for the OIG Managed Server:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: governancedomain weblogic.domainUID: governancedomain weblogic.resourceVersion: domain-v2 name: governancedomain-cluster-oim-cluster-ssl namespace: oigns spec: clusterIP: None ports: - name: default port: 14101 protocol: TCP targetPort: 14101 selector: weblogic.clusterName: oim_cluster weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: governancedomain type: ClusterIP Apply the template using the following command for the Administration Server:\n$ kubectl apply -f governancedomain-adminserver-ssl.yaml service/governancedomain-adminserver-ssl created or using the following command for the OIG Managed Server:\n$ kubectl apply -f governancedomain-oim-cluster-ssl.yaml service/governancedomain-cluster-oim-cluster-ssl created Validate that the Kubernetes Services to access SSL ports are created successfully:\n$ kubectl get svc -n \u0026lt;domain_namespace\u0026gt; |grep ssl For example:\n$ kubectl get svc -n oigns |grep ssl The output will look similar to the following:\ngovernancedomain-adminserver-ssl ClusterIP None \u0026lt;none\u0026gt; 7002/TCP 74s governancedomain-cluster-oim-cluster-ssl ClusterIP None \u0026lt;none\u0026gt; 14101/TCP 21s Connect to a bash shell of the helper pod:\n$ kubectl exec -it helper -n oigns -- /bin/bash In the bash shell run the following:\n[oracle@governancedomain-adminserver oracle]$ export WLST_PROPERTIES=\u0026#34;-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust\u0026#34; [oracle@governancedomain-adminserver oracle]$ cd /u01/oracle/oracle_common/common/bin [oracle@governancedomain-adminserver oracle]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; Connect to the Administration Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://governancedomain-adminserver-ssl:7002\u0026#39;) Connecting to t3s://governancedomain-adminserver-ssl:7002 with userid weblogic ... \u0026lt;Mar 10, 2022 4:51:43 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Mar 10, 2022 4:51:43 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Mar 10, 2022 4:51:43 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to Admin Server \u0026#34;AdminServer\u0026#34; that belongs to domain \u0026#34;governancedomain\u0026#34;. wls:/governancedomain/serverConfig/\u0026gt; To connect to the OIG Managed Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://governancedomain-cluster-oim-cluster-ssl:14101\u0026#39;) Connecting to t3s://governancedomain-cluster-oim-cluster-ssl:14101 with userid weblogic ... \u0026lt;Mar 10, 2022 4:53:06 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Mar 10, 2022 4:53:06 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Mar 10, 2022 4:53:06 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to managed Server \u0026#34;oim_server1\u0026#34; that belongs to domain \u0026#34;governancedomain\u0026#34;. wls:/governancedomain/serverConfig/\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Check the Kubernetes cluster is ready Obtain the OID container image Setup the code repository to deploy OID Check the Kubernetes cluster is ready Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/master-node Ready control-plane,master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-slxdq 1/1 Running 1 67d pod/coredns-66bff467f8-v77qt 1/1 Running 1 67d pod/etcd-10.89.73.42 1/1 Running 1 67d pod/kube-apiserver-10.89.73.42 1/1 Running 1 67d pod/kube-controller-manager-10.89.73.42 1/1 Running 27 67d pod/kube-flannel-ds-amd64-r2m8r 1/1 Running 2 48d pod/kube-flannel-ds-amd64-rdhrf 1/1 Running 2 6d1h pod/kube-flannel-ds-amd64-vpcbj 1/1 Running 3 66d pod/kube-proxy-jtcxm 1/1 Running 1 67d pod/kube-proxy-swfmm 1/1 Running 1 66d pod/kube-proxy-w6x6t 1/1 Running 1 66d pod/kube-scheduler-10.89.73.42 1/1 Running 29 67d Obtain the OID container image The OID Kubernetes deployment requires access to an OID container image. The image can be obtained in the following ways:\n Prebuilt OID container image Build your own OID container image using WebLogic Image Tool Prebuilt OID container image The latest prebuilt OID container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Internet Directory 12.2.1.4.0 and the latest PSU.\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oid_cpu and accept the license agreement.\nAlternatively the same image can also be downloaded from My Oracle Support by referring to the document ID 2723908.1.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OID Kubernetes deployment. Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. Build your own OID container image using WebLogic Image Tool You can build your own OID container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OID container image. For more information about building your own container image with WebLogic Image Tool, see Create or update an image.\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nSetup the Code repository to deploy OID Oracle Internet Directory deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Internet Directory containers using the Helm charts provided. To deploy Oracle Internet Directory on Kubernetes you should set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OIDContainer Download the latest OID deployment scripts from the OID repository:\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 For example:\n$ cd /scratch/OIDContainer $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleInternetDirectory For example:\n$ export WORKDIR=/scratch/OIDContainer/fmw-kubernetes/OracleInternetDirectory You are now ready to create the OID deployment as per Create OID instances.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Check the Kubernetes cluster is ready Obtain the OUD container image Create a persistent volume directory Setup the code repository to deploy OUD Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/master-node Ready control-plane,master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-slxdq 1/1 Running 1 67d pod/coredns-66bff467f8-v77qt 1/1 Running 1 67d pod/etcd-10.89.73.42 1/1 Running 1 67d pod/kube-apiserver-10.89.73.42 1/1 Running 1 67d pod/kube-controller-manager-10.89.73.42 1/1 Running 27 67d pod/kube-flannel-ds-amd64-r2m8r 1/1 Running 2 48d pod/kube-flannel-ds-amd64-rdhrf 1/1 Running 2 6d1h pod/kube-flannel-ds-amd64-vpcbj 1/1 Running 3 66d pod/kube-proxy-jtcxm 1/1 Running 1 67d pod/kube-proxy-swfmm 1/1 Running 1 66d pod/kube-proxy-w6x6t 1/1 Running 1 66d pod/kube-scheduler-10.89.73.42 1/1 Running 29 67d Obtain the OUD container image The OUD Kubernetes deployment requires access to an OUD container image. The image can be obtained in the following ways:\n Prebuilt OUD container image Build your own OUD container image using WebLogic Image Tool Prebuilt OUD container image The latest prebuilt OUD container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory 12.2.1.4.0 and the latest PSU.\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oud_cpu and accept the license agreement.\nAlternatively the same image can also be downloaded from My Oracle Support by referring to the document ID 2723908.1.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OUD Kubernetes deployment. Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. Build your own OUD container image using WebLogic Image Tool You can build your own OUD container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUD container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nCreate a persistent volume directory As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nMake sure the persistent volume path has full access permissions, and that the folder is empty. In this example /scratch/shared/ is a shared directory accessible from all nodes.\n On the master node run the following command to create a user_projects directory:\n$ cd \u0026lt;persistent_volume\u0026gt; $ mkdir oud_user_projects $ chmod 777 oud_user_projects For example:\n$ cd /scratch/shared $ mkdir oud_user_projects $ chmod 777 oud_user_projects On the master node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oud_user_projects $ touch file.txt $ ls filemaster.txt For example:\n$ cd /scratch/shared/oud_user_projects $ touch filemaster.txt $ ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd /scratch/shared/oud_user_projects $ ls filemaster.txt $ touch fileworker1.txt $ ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Setup the code repository to deploy OUD Oracle Unified Directory deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory containers using the Helm charts provided. To deploy Oracle Unified Directory on Kubernetes you should set up the deployment scripts on the persistent volume as below:\nNote: The work directory must be created on the persistent volume as access to the helm charts is required by a cron job created during OUD deployment.\n Create a working directory on the persistent volume to setup the source code.\n$ mkdir \u0026lt;persistent_volume\u0026gt;/\u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/shared/OUDContainer Download the latest OUD deployment scripts from the OUD repository:\n$ cd \u0026lt;persistent_volume\u0026gt;/\u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 For example:\n$ cd /scratch/shared/OUDContainer $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleUnifiedDirectory For example:\n$ export WORKDIR=/scratch/shared/OUDContainer/fmw-kubernetes/OracleUnifiedDirectory You are now ready to create the OUD deployment as per Create OUD instances.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/prepare-your-environment/", + "title": "Prepare Your Environment", + "tags": [], + "description": "Prepare your environment", + "content": " Check the Kubernetes cluster is ready Obtain the OUDSM container image Setup the code repository to deploy OUDSM Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/master-node Ready control-plane,master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-slxdq 1/1 Running 1 67d pod/coredns-66bff467f8-v77qt 1/1 Running 1 67d pod/etcd-10.89.73.42 1/1 Running 1 67d pod/kube-apiserver-10.89.73.42 1/1 Running 1 67d pod/kube-controller-manager-10.89.73.42 1/1 Running 27 67d pod/kube-flannel-ds-amd64-r2m8r 1/1 Running 2 48d pod/kube-flannel-ds-amd64-rdhrf 1/1 Running 2 6d1h pod/kube-flannel-ds-amd64-vpcbj 1/1 Running 3 66d pod/kube-proxy-jtcxm 1/1 Running 1 67d pod/kube-proxy-swfmm 1/1 Running 1 66d pod/kube-proxy-w6x6t 1/1 Running 1 66d pod/kube-scheduler-10.89.73.42 1/1 Running 29 67d Obtain the OUDSM container image The Oracle Unified Directory Services Manager (OUDSM) Kubernetes deployment requires access to an OUDSM container image. The image can be obtained in the following ways:\n Prebuilt OUDSM container image Build your own OUDSM container image using WebLogic Image Tool Prebuilt OUDSM container image The latest prebuilt OUDSM container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Unified Directory Services Manager 12.2.1.4.0 and the latest PSU.\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oudsm_cpu and accept the license agreement.\nAlternatively the same image can also be downloaded from My Oracle Support by referring to the document ID 2723908.1.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OUDSM Kubernetes deployment. Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. Build your own OUDSM container image using WebLogic Image Tool You can build your own OUDSM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OUDSM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nSetup the code repository to deploy OUDSM Oracle Unified Directory Services Manager deployment on Kubernetes leverages deployment scripts provided by Oracle for creating Oracle Unified Directory Services Manager containers using the Helm charts provided. To deploy Oracle Unified Directory Services Manager on Kubernetes you should set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OUDSMContainer Download the latest OUDSM deployment scripts from the OUDSM repository:\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 For example:\n$ cd /scratch/OUDSMContainer $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleUnifiedDirectorySM For example:\n$ export WORKDIR=/scratch/OUDSMContainer/fmw-kubernetes/OracleUnifiedDirectorySM You are now ready to create the OUDSM deployment as per Create OUDSM instances.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/prepare-your-environment/", + "title": "Prepare your environment", + "tags": [], + "description": "Sample for creating an OAM domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OAM domain.", + "content": "To prepare for Oracle Access Management deployment in a Kubernetes environment, complete the following steps:\n Check the Kubernetes cluster is ready\n Obtain the OAM container image\n Set up the code repository to deploy OAM domains\n Install the WebLogic Kubernetes Operator\n Create a namespace for Oracle Access Management\n Create a Kubernetes secret for the container registry\n RCU schema creation\n Preparing the environment for domain creation\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\n Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\nCheck that all the nodes in the Kubernetes cluster are running.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/master-node Ready control-plane,master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21 Obtain the OAM container image The OAM Kubernetes deployment requires access to an OAM container image. The image can be obtained in the following ways:\n Prebuilt OAM container image Build your own OAM container image using WebLogic Image Tool Prebuilt OAM container image The latest prebuilt OAM container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Access Management 12.2.1.4.0 and the latest PSU.\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oam_cpu and accept the license agreement.\nAlternatively the same image can also be downloaded from My Oracle Support by referring to the document ID 2723908.1.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OAM Kubernetes deployment. Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. Build your own OAM container image using WebLogic Image Tool You can build your own OAM container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OAM container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nSet up the code repository to deploy OAM domains OAM domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OAM domains, you need to set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OAMK8S Download the latest OAM deployment scripts from the OAM repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 For example:\n$ cd /scratch/OAMK8S $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleAccessManagement For example:\n$ export WORKDIR=/scratch/OAMK8S/fmw-kubernetes/OracleAccessManagement Run the following command and see if the WebLogic custom resource definition name already exists:\n$ kubectl get crd In the output you should see:\nNo resources found If you see the following:\nNAME AGE domains.weblogic.oracle 5d then run the following command to delete the existing crd:\n$ kubectl delete crd domains.weblogic.oracle customresourcedefinition.apiextensions.k8s.io \u0026#34;domains.weblogic.oracle\u0026#34; deleted Install the WebLogic Kubernetes Operator On the master node run the following command to create a namespace for the operator:\n$ kubectl create namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl create namespace opns The output will look similar to the following:\nnamespace/opns created Create a service account for the operator in the operator\u0026rsquo;s namespace by running the following command:\n$ kubectl create serviceaccount -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; \u0026lt;sample-kubernetes-operator-sa\u0026gt; For example:\n$ kubectl create serviceaccount -n opns op-sa The output will look similar to the following:\nserviceaccount/op-sa created Run the following helm command to install and start the operator:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=\u0026lt;sample-kubernetes-operator-sa\u0026gt; \\ --set “enableClusterRoleBinding=true” \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait For example:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace opns \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=op-sa \\ --set \u0026#34;enableClusterRoleBinding=true\u0026#34; \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait The output will look similar to the following:\nNAME: weblogic-kubernetes-operator LAST DEPLOYED: Mon Mar 06 10:25:39 NAMESPACE: opns STATUS: deployed REVISION: 1 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-676d5cc6f4-wct7b 2/2 Running 0 40s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.101.1.198 \u0026lt;none\u0026gt; 8082/TCP 40s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 40s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 40s Verify the operator pod\u0026rsquo;s log:\n$ kubectl logs -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; -c weblogic-operator deployments/weblogic-operator For example:\n$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator The output will look similar to the following:\n... {\u0026quot;timestamp\u0026quot;:\u0026quot;2022-03-06T10:26:10.917829423Z\u0026quot;,\u0026quot;thread\u0026quot;:13,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762370917,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2022-03-06T10:26:20.920145876Z\u0026quot;,\u0026quot;thread\u0026quot;:13,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762380920,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2022-03-06T10:26:30.922360564Z\u0026quot;,\u0026quot;thread\u0026quot;:19,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762390922,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2022-03-06T10:26:40.924847211Z\u0026quot;,\u0026quot;thread\u0026quot;:29,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1635762400924,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} Create a namespace for Oracle Access Management Run the following command to create a namespace for the domain:\n$ kubectl create namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create namespace oamns The output will look similar to the following:\nnamespace/oamns created Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator=enabled For example:\n$ kubectl label namespaces oamns weblogic-operator=enabled The output will look similar to the following:\nnamespace/oamns labeled Run the following command to check the label was created:\n$ kubectl describe namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe namespace oamns The output will look similar to the following:\nName: oamns Labels: weblogic-operator=enabled Annotations: \u0026lt;none\u0026gt; Status: Active No resource quota. No LimitRange resource. Create a Kubernetes secret for the container registry In this section you create a secret that stores the credentials for the container registry where the OAM image is stored. This step must be followed if using Oracle Container Registry or your own private registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oamns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OAM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oam_cpu and accept the license agreement.\n If using your own container registry to store the OAM container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created RCU schema creation In this section you create the RCU schemas in the Oracle Database.\nBefore following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.\n If using Oracle Container Registry or your own container registry for your OAM container image, run the following command to create a helper pod to run RCU:\n$ kubectl run --image=\u0026lt;image_name-from-registry\u0026gt;:\u0026lt;tag\u0026gt; --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;, \u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n \u0026lt;domain_namespace\u0026gt; -- sleep infinity For example:\n$ kubectl run --image=container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-220119.2059 --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;,\u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n oamns -- sleep infinity If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:\n$ kubectl run helper --image \u0026lt;image\u0026gt;:\u0026lt;tag\u0026gt; -n oamns -- sleep infinity For example:\n$ kubectl run helper --image oracle/oam:12.2.1.4-jdk8-ol7-220119.2059 -n oamns -- sleep infinity The output will look similar to the following:\npod/helper created Run the following command to check the pod is running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3m Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS of 1\\1. While the pod is starting you can check the status of the pod, by running the following command:\n$ kubectl describe pod helper -n oamns Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ In the helper bash shell run the following commands to set the environment:\n[oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@helper ~]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper ~]$ cat /tmp/pwd.txt where:\n\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt;\tis your database connect string\n\u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix you want to set\n\u0026lt;db_pwd\u0026gt; is the SYS password for the database\n\u0026lt;rcu_schema_pwd\u0026gt; is the password you want to set for the \u0026lt;rcu_schema_prefix\u0026gt;\nFor example:\n[oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OAMK8S [oracle@helper ~]$ echo -e \u0026lt;password\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;password\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper ~]$ cat /tmp/pwd.txt \u0026lt;password\u0026gt; \u0026lt;password\u0026gt; In the helper bash shell run the following command to create the RCU schemas in the database:\n$ [oracle@helper ~]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \\ $CONNECTION_STRING -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \\ -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component MDS -component IAU \\ -component IAU_APPEND -component IAU_VIEWER -component OPSS -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt The output will look similar to the following:\nRCU Logfile: /tmp/RCU2022-03-06_10-29_561898106/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites Repository Creation Utility - Checking Prerequisites Checking Component Prerequisites Repository Creation Utility - Creating Tablespaces Validating and Creating Tablespaces Create tablespaces in the repository database Repository Creation Utility - Create Repository Create in progress. Executing pre create operations Percent Complete: 18 Percent Complete: 18 Percent Complete: 19 Percent Complete: 20 Percent Complete: 21 Percent Complete: 21 Percent Complete: 22 Percent Complete: 22 Creating Common Infrastructure Services(STB) Percent Complete: 30 Percent Complete: 30 Percent Complete: 39 Percent Complete: 39 Percent Complete: 39 Creating Audit Services Append(IAU_APPEND) Percent Complete: 46 Percent Complete: 46 Percent Complete: 55 Percent Complete: 55 Percent Complete: 55 Creating Audit Services Viewer(IAU_VIEWER) Percent Complete: 62 Percent Complete: 62 Percent Complete: 63 Percent Complete: 63 Percent Complete: 64 Percent Complete: 64 Creating Metadata Services(MDS) Percent Complete: 73 Percent Complete: 73 Percent Complete: 73 Percent Complete: 74 Percent Complete: 74 Percent Complete: 75 Percent Complete: 75 Percent Complete: 75 Creating Weblogic Services(WLS) Percent Complete: 80 Percent Complete: 80 Percent Complete: 83 Percent Complete: 83 Percent Complete: 91 Percent Complete: 98 Percent Complete: 98 Creating Audit Services(IAU) Percent Complete: 100 Creating Oracle Platform Security Services(OPSS) Creating Oracle Access Manager(OAM) Executing post create operations Repository Creation Utility: Create - Completion Summary Database details: ----------------------------- Host Name : mydatabasehost.example.com Port : 1521 Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OAMK8S RCU Logfile : /tmp/RCU2022-03-06_10-29_561898106/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile Common Infrastructure Services Success /tmp/RCU2022-03-06_10-29_561898106/logs/stb.log Oracle Platform Security Services Success /tmp/RCU2022-03-06_10-29_561898106/logs/opss.log Oracle Access Manager Success /tmp/RCU2022-03-06_10-29_561898106/logs/oam.log Audit Services Success /tmp/RCU2022-03-06_10-29_561898106/logs/iau.log Audit Services Append Success /tmp/RCU2022-03-06_10-29_561898106/logs/iau_append.log Audit Services Viewer Success /tmp/RCU2022-03-06_10-29_561898106/logs/iau_viewer.log Metadata Services Success /tmp/RCU2022-03-06_10-29_561898106/logs/mds.log WebLogic Services Success /tmp/RCU2022-03-06_10-29_561898106/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper ~]$ Exit the helper bash shell by issuing the command exit.\n Preparing the environment for domain creation In this section you prepare the environment for the OAM domain creation. This involves the following steps:\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\nCreating Kubernetes secrets for the domain and RCU Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;pwd\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -d \u0026lt;domain_uid\u0026gt; -s \u0026lt;kubernetes_domain_secret\u0026gt; where:\n-u weblogic is the WebLogic username\n-p \u0026lt;pwd\u0026gt; is the password for the weblogic user\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-d \u0026lt;domain_uid\u0026gt; is the domain UID to be created. The default is domain1 if not specified\n-s \u0026lt;kubernetes_domain_secret\u0026gt; is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified\nFor example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;password\u0026gt; -n oamns -d accessdomain -s accessdomain-credentials The output will look similar to the following:\nsecret/accessdomain-credentials created secret/accessdomain-credentials labeled The secret accessdomain-credentials has been successfully created in the oamns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_domain_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret accessdomain-credentials -o yaml -n oamns The output will look similar to the following:\napiVersion: v1 data: password: V2VsY29tZTE= username: d2VibG9naWM= kind: Secret metadata: creationTimestamp: \u0026quot;2022-03-06T10:41:11Z\u0026quot; labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain name: accessdomain-credentials namespace: oamns resourceVersion: \u0026quot;2913144\u0026quot; uid: 5f8d9874-9cd7-42be-af4b-54f787e71ac2 type: Opaque Create a Kubernetes secret for RCU using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u \u0026lt;rcu_prefix\u0026gt; -p \u0026lt;rcu_schema_pwd\u0026gt; -a sys -q \u0026lt;sys_db_pwd\u0026gt; -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -s \u0026lt;kubernetes_rcu_secret\u0026gt; where:\n-u \u0026lt;rcu_prefix\u0026gt; is the name of the RCU schema prefix created previously\n-p \u0026lt;rcu_schema_pwd\u0026gt; is the password for the RCU schema prefix\n-q \u0026lt;sys_db_pwd\u0026gt; is the sys database password\n-d \u0026lt;domain_uid\u0026gt; is the domain_uid that you created earlier\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-s \u0026lt;kubernetes_rcu_secret\u0026gt; is the name of the rcu secret to create\nFor example:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u OAMK8S -p \u0026lt;password\u0026gt; -a sys -q \u0026lt;password\u0026gt; -d accessdomain -n oamns -s accessdomain-rcu-credentials The output will look similar to the following:\nsecret/accessdomain-rcu-credentials created secret/accessdomain-rcu-credentials labeled The secret accessdomain-rcu-credentials has been successfully created in the oamns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_rcu_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret accessdomain-rcu-credentials -o yaml -n oamns The output will look similar to the following:\napiVersion: v1 data: password: T3JhY2xlXzEyMw== sys_password: T3JhY2xlXzEyMw== sys_username: c3lz username: T0FNSzhT kind: Secret metadata: creationTimestamp: \u0026quot;2022-03-06T10:50:34Z\u0026quot; labels: weblogic.domainName: accessdomain weblogic.domainUID: accessdomain name: accessdomain-rcu-credentials namespace: oamns resourceVersion: \u0026quot;2913938\u0026quot; uid: 3798af1b-2783-415f-aea8-31e0610220a7 type: Opaque Create a Kubernetes persistent volume and persistent volume claim As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nA persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.\nWhen a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.\nThe example below uses an NFS mounted volume (\u0026lt;persistent_volume\u0026gt;/accessdomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.\nNote: The persistent volume directory needs to be accessible to both the master and worker node(s). Make sure this path has full access permissions, and that the folder is empty. In this example /scratch/shared/accessdomainpv is accessible from all nodes via NFS.\nTo create a Kubernetes persistent volume, perform the following steps:\n Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p \u0026lt;persistent_volume\u0026gt;/accessdomainpv $ chmod -R 777 \u0026lt;persistent_volume\u0026gt;/accessdomainpv For example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/shared/accessdomainpv $ chmod -R 777 /scratch/shared/accessdomainpv On the master node run the following command to ensure it is possible to read and write to the persistent volume:\ncd \u0026lt;persistent_volume\u0026gt;/accessdomainpv touch filemaster.txt ls filemaster.txt For example:\ncd /scratch/shared/accessdomainpv touch filemaster.txt ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\ncd /scratch/shared/accessdomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc and edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:\nbaseName: \u0026lt;domain\u0026gt; domainUID: \u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; weblogicDomainStorageType: NFS weblogicDomainStorageNFSServer: \u0026lt;nfs_server\u0026gt; weblogicDomainStoragePath: \u0026lt;physical_path_of_persistent_storage\u0026gt; weblogicDomainStorageSize: 10Gi For example:\n\t# The base name of the pv and pvc baseName: domain # Unique ID identifying a domain. # If left empty, the generated pv can be shared by multiple domains # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: accessdomain # Name of the namespace for the persistent volume claim namespace: oamns ... # Persistent volume type for the persistent storage. # The value must be 'HOST_PATH' or 'NFS'. # If using 'NFS', weblogicDomainStorageNFSServer must be specified. weblogicDomainStorageType: NFS # The server name or ip address of the NFS server to use for the persistent storage. # The following line must be uncomment and customized if weblogicDomainStorateType is NFS: weblogicDomainStorageNFSServer: mynfsserver # Physical path of the persistent storage. # When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the # domain storage on the Kubernetes host. # When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set # to the IP address or name of the DNS server, and this value should be set to the exported path # on that server. # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: weblogicDomainStoragePath: /scratch/shared/accessdomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' weblogicDomainStorageReclaimPolicy: Retain # Total storage allocated to the persistent storage. weblogicDomainStorageSize: 10Gi Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:\n$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output The output will be similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-pv-pvc-inputs-v1\u0026quot; export baseName=\u0026quot;domain\u0026quot; export domainUID=\u0026quot;accessdomain\u0026quot; export namespace=\u0026quot;oamns\u0026quot; export weblogicDomainStorageType=\u0026quot;NFS\u0026quot; export weblogicDomainStorageNFSServer=\u0026quot;mynfsserver\u0026quot; export weblogicDomainStoragePath=\u0026quot;/scratch/shared/accessdomainpv\u0026quot; export weblogicDomainStorageReclaimPolicy=\u0026quot;Retain\u0026quot; export weblogicDomainStorageSize=\u0026quot;10Gi\u0026quot; Generating output/pv-pvcs/accessdomain-domain-pv.yaml Generating output/pv-pvcs/accessdomain-domain-pvc.yaml The following files were generated: output/pv-pvcs/accessdomain-domain-pv.yaml.yaml output/pv-pvcs/accessdomain-domain-pvc.yaml Run the following to show the files are created:\n$ ls output/pv-pvcs accessdomain-domain-pv.yaml accessdomain-domain-pvc.yaml create-pv-pvc-inputs.yaml Run the following kubectl command to create the PV and PVC in the domain namespace:\n$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n \u0026lt;domain_namespace\u0026gt; $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create -f output/pv-pvcs/accessdomain-domain-pv.yaml -n oamns $ kubectl create -f output/pv-pvcs/accessdomain-domain-pvc.yaml -n oamns The output will look similar to the following:\npersistentvolume/accessdomain-domain-pv created persistentvolumeclaim/accessdomain-domain-pvc created Run the following commands to verify the PV and PVC were created successfully:\n$ kubectl describe pv \u0026lt;pv_name\u0026gt; $ kubectl describe pvc \u0026lt;pvc_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pv accessdomain-domain-pv $ kubectl describe pvc accessdomain-domain-pvc -n oamns The output will look similar to the following:\n$ kubectl describe pv accessdomain-domain-pv Name: accessdomain-domain-pv Labels: weblogic.domainUID=accessdomain Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: accessdomain-domain-storage-class Status: Bound Claim: oamns/accessdomain-domain-pvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver Path: /scratch/shared/accessdomainpv ReadOnly: false Events: \u0026lt;none\u0026gt; $ kubectl describe pvc accessdomain-domain-pvc -n oamns Name: accessdomain-domain-pvc Namespace: oamns StorageClass: accessdomain-domain-storage-class Status: Bound Volume: accessdomain-domain-pv Labels: weblogic.domainUID=accessdomain Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Events: \u0026lt;none\u0026gt; Mounted By: \u0026lt;none\u0026gt; You are now ready to create the OAM domain as per Create OAM Domains\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/prepare-your-environment/", + "title": "Prepare your environment", + "tags": [], + "description": "Preparation to deploy OIG on Kubernetes", + "content": "To prepare for Oracle Identity Governance deployment in a Kubernetes environment, complete the following steps:\n Check the Kubernetes cluster is ready\n Obtain the OIG container image\n Setup the code repository to deploy OIG domains\n Install the WebLogic Kubernetes Operator\n Create a namespace for Oracle Identity Governance\n Create a Kubernetes secret for the container registry\n RCU schema creation\n Preparing the environment for domain creation\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\n Check the Kubernetes cluster is ready As per the Prerequisites a Kubernetes cluster should have already been configured.\n Run the following command on the master node to check the cluster and worker nodes are running:\n$ kubectl get nodes,pods -n kube-system The output will look similar to the following:\nNAME STATUS ROLES AGE VERSION node/worker-node1 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/worker-node2 Ready \u0026lt;none\u0026gt; 17h v1.20.10 node/master-node Ready master 23h v1.20.10 NAME READY STATUS RESTARTS AGE pod/coredns-66bff467f8-fnhbq 1/1 Running 0 23h pod/coredns-66bff467f8-xtc8k 1/1 Running 0 23h pod/etcd-master 1/1 Running 0 21h pod/kube-apiserver-master-node 1/1 Running 0 21h pod/kube-controller-manager-master-node 1/1 Running 0 21h pod/kube-flannel-ds-amd64-lxsfw 1/1 Running 0 17h pod/kube-flannel-ds-amd64-pqrqr 1/1 Running 0 17h pod/kube-flannel-ds-amd64-wj5nh 1/1 Running 0 17h pod/kube-proxy-2kxv2 1/1 Running 0 17h pod/kube-proxy-82vvj 1/1 Running 0 17h pod/kube-proxy-nrgw9 1/1 Running 0 23h pod/kube-scheduler-master 1/1 Running 0 21$ Obtain the OIG container image The OIG Kubernetes deployment requires access to an OIG container image. The image can be obtained in the following ways:\n Prebuilt OIG container image Build your own OIG container image using WebLogic Image Tool Prebuilt OIG container image The latest prebuilt OIG container image can be downloaded from Oracle Container Registry. This image is prebuilt by Oracle and includes Oracle Identity Governance 12.2.1.4.0 and the latest PSU.\nNote: Before using this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oig_cpu and accept the license agreement.\nAlternatively the same image can also be downloaded from My Oracle Support by referring to the document ID 2723908.1.\nYou can use this image in the following ways:\n Pull the container image from the Oracle Container Registry automatically during the OIG Kubernetes deployment. Manually pull the container image from the Oracle Container Registry or My Oracle Support, and then upload it to your own container registry. Manually pull the container image from the Oracle Container Registry or My Oracle Support and manually stage it on the master node and each worker node. Build your own OIG container image using WebLogic Image Tool You can build your own OIG container image using the WebLogic Image Tool. This is recommended if you need to apply one off patches to a Prebuilt OIG container image. For more information about building your own container image with WebLogic Image Tool, see Create or update image.\nYou can use an image built with WebLogic Image Tool in the following ways:\n Manually upload them to your own container registry. Manually stage them on the master node and each worker node. Note: This documentation does not tell you how to pull or push the above images into a private container registry, or stage them on the master and worker nodes. Details of this can be found in the Enterprise Deployment Guide.\nSetup the code repository to deploy OIG domains Oracle Identity Governance domain deployment on Kubernetes leverages the WebLogic Kubernetes Operator infrastructure. For deploying the OIG domains, you need to set up the deployment scripts on the master node as below:\n Create a working directory to setup the source code.\n$ mkdir \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/OIGK8S Download the latest OIG deployment scripts from the OIG repository.\n$ cd \u0026lt;workdir\u0026gt; $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 For example:\n$ cd /scratch/OIGK8S $ git clone https://github.com/oracle/fmw-kubernetes.git --branch release/22.2.1 Set the $WORKDIR environment variable as follows:\n$ export WORKDIR=\u0026lt;workdir\u0026gt;/fmw-kubernetes/OracleIdentityGovernance For example:\n$ export WORKDIR=/scratch/OIGK8S/fmw-kubernetes/OracleIdentityGovernance Run the following command and see if the WebLogic custom resource definition name already exists:\n$ kubectl get crd In the output you should see:\nNo resources found in default namespace. If you see the following:\nNAME AGE domains.weblogic.oracle 5d then run the following command to delete the existing crd:\n$ kubectl delete crd domains.weblogic.oracle customresourcedefinition.apiextensions.k8s.io \u0026#34;domains.weblogic.oracle\u0026#34; deleted Install the WebLogic Kubernetes Operator On the master node run the following command to create a namespace for the operator:\n$ kubectl create namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl create namespace opns The output will look similar to the following:\nnamespace/opns created Create a service account for the operator in the operator\u0026rsquo;s namespace by running the following command:\n$ kubectl create serviceaccount -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; \u0026lt;sample-kubernetes-operator-sa\u0026gt; For example:\n$ kubectl create serviceaccount -n opns op-sa The output will look similar to the following:\nserviceaccount/op-sa created Run the following helm command to install and start the operator:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=\u0026lt;sample-kubernetes-operator-sa\u0026gt; \\ --set “enableClusterRoleBinding=true” \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait For example:\n$ cd $WORKDIR $ helm install weblogic-kubernetes-operator kubernetes/charts/weblogic-operator \\ --namespace opns \\ --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 \\ --set serviceAccount=op-sa \\ --set \u0026#34;enableClusterRoleBinding=true\u0026#34; \\ --set \u0026#34;domainNamespaceSelectionStrategy=LabelSelector\u0026#34; \\ --set \u0026#34;domainNamespaceLabelSelector=weblogic-operator\\=enabled\u0026#34; \\ --set \u0026#34;javaLoggingLevel=FINE\u0026#34; --wait The output will look similar to the following:\nNAME: weblogic-kubernetes-operator LAST DEPLOYED: Wed Mar 9 11:51:37 2022 NAMESPACE: opns STATUS: deployed REVISION: 1 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-676d5cc6f4-rwzxf 2/2 Running 0 59s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.102.7.232 \u0026lt;none\u0026gt; 8082/TCP 59s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 59s NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 1 1 1 59s Verify the operator pod\u0026rsquo;s log:\n$ kubectl logs -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; -c weblogic-operator deployments/weblogic-operator For example:\n$ kubectl logs -n opns -c weblogic-operator deployments/weblogic-operator The output will look similar to the following:\n{\u0026quot;timestamp\u0026quot;:\u0026quot;2022-03-09T11:52:53.167756673Z\u0026quot;,\u0026quot;thread\u0026quot;:23,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1636650293167,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2022-03-09T11:53:03.170083172Z\u0026quot;,\u0026quot;thread\u0026quot;:30,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1636650303170,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} {\u0026quot;timestamp\u0026quot;:\u0026quot;2022-03-09T11:52:13.172302644Z\u0026quot;,\u0026quot;thread\u0026quot;:29,\u0026quot;fiber\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;namespace\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;domainUID\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;level\u0026quot;:\u0026quot;CONFIG\u0026quot;,\u0026quot;class\u0026quot;:\u0026quot;oracle.kubernetes.operator.TuningParametersImpl\u0026quot;,\u0026quot;method\u0026quot;:\u0026quot;update\u0026quot;,\u0026quot;timeInMillis\u0026quot;:1636650313172,\u0026quot;message\u0026quot;:\u0026quot;Reloading tuning parameters from Operator's config map\u0026quot;,\u0026quot;exception\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;code\u0026quot;:\u0026quot;\u0026quot;,\u0026quot;headers\u0026quot;:{},\u0026quot;body\u0026quot;:\u0026quot;\u0026quot;} Create a namespace for Oracle Identity Governance Run the following command to create a namespace for the domain:\n$ kubectl create namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create namespace oigns The output will look similar to the following:\nnamespace/oigns created Run the following command to tag the namespace so the WebLogic Kubernetes Operator can manage it:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator=enabled For example:\n$ kubectl label namespaces oigns weblogic-operator=enabled The output will look similar to the following:\nnamespace/oigns labeled Run the following command to check the label was created:\n$ kubectl describe namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe namespace oigns The output will look similar to the following:\nName: oigns Labels: weblogic-operator=enabled Annotations: \u0026lt;none\u0026gt; Status: Active No resource quota. No LimitRange resource. Create a Kubernetes secret for the container registry In this section you create a secret that stores the credentials for the container registry where the OIG image is stored. This step must be followed if using Oracle Container Registry or your own private registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oigns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OIG container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oig_cpu and accept the license agreement.\n If using your own container registry to store the OIG container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created RCU schema creation In this section you create the RCU schemas in the Oracle Database.\nBefore following the steps in this section, make sure that the database and listener are up and running and you can connect to the database via SQL*Plus or other client tool.\n If using Oracle Container Registry or your own container registry for your OIG container image, run the following command to create a helper pod to run RCU:\n$ kubectl run --image=\u0026lt;image_name-from-registry\u0026gt; --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;, \u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n \u0026lt;domain_namespace\u0026gt; -- sleep infinity For example:\n$ kubectl run --image=container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359 --image-pull-policy=\u0026#34;IfNotPresent\u0026#34; --overrides=\u0026#39;{\u0026#34;apiVersion\u0026#34;: \u0026#34;v1\u0026#34;,\u0026#34;spec\u0026#34;:{\u0026#34;imagePullSecrets\u0026#34;: [{\u0026#34;name\u0026#34;: \u0026#34;orclcred\u0026#34;}]}}\u0026#39; helper -n oigns -- sleep infinity If you are not using a container registry and have loaded the image on each of the master and worker nodes, run the following command:\n$ kubectl run helper --image \u0026lt;image\u0026gt; -n oigns -- sleep infinity For example:\n$ kubectl run helper --image oracle/oig:12.2.1.4-jdk8-ol7-220120.1359 -n oigns -- sleep infinity The output will look similar to the following:\npod/helper created Run the following command to check the pod is running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3m Note: If you are pulling the image from a container registry it may take several minutes before the pod has a STATUS of 1\\1. While the pod is starting you can check the status of the pod, by running the following command:\n$ kubectl describe pod helper -n oigns Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper oracle]$ In the helper bash shell run the following commands to set the environment:\n[oracle@helper oracle]$ export DB_HOST=\u0026lt;db_host.domain\u0026gt; [oracle@helper oracle]$ export DB_PORT=\u0026lt;db_port\u0026gt; [oracle@helper oracle]$ export DB_SERVICE=\u0026lt;service_name\u0026gt; [oracle@helper oracle]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;rcu_schema_pwd\u0026gt; [oracle@helper oracle]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper oracle]$ cat /tmp/pwd.txt where:\n\u0026lt;db_host.domain\u0026gt; is the database server hostname\n\u0026lt;db_port\u0026gt; is the database listener port\n\u0026lt;service_name\u0026gt; is the database service name\n\u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix you want to set\n\u0026lt;rcu_schema_pwd\u0026gt; is the password you want to set for the \u0026lt;rcu_schema_prefix\u0026gt;\n\u0026lt;db_pwd\u0026gt; is the SYS password for the database\nFor example:\n[oracle@helper oracle]$ export DB_HOST=mydatabasehost.example.com [oracle@helper oracle]$ export DB_PORT=1521 [oracle@helper oracle]$ export DB_SERVICE=orcl.example.com [oracle@helper oracle]$ export RCUPREFIX=OIGK8S [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;password\u0026gt; [oracle@helper oracle]$ echo -e \u0026lt;password\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;password\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper oracle]$ cat /tmp/pwd.txt \u0026lt;password\u0026gt; \u0026lt;password\u0026gt; In the helper bash shell run the following commands to create the RCU schemas in the database:\n[oracle@helper oracle]$ /u01/oracle/oracle_common/bin/rcu -silent -createRepository -databaseType ORACLE -connectString \\ $DB_HOST:$DB_PORT/$DB_SERVICE -dbUser sys -dbRole sysdba -useSamePasswordForAllSchemaUsers true \\ -selectDependentsForComponents true -schemaPrefix $RCUPREFIX -component OIM -component MDS -component SOAINFRA -component OPSS \\ -f \u0026lt; /tmp/pwd.txt The output will look similar to the following:\nRCU Logfile: /tmp/RCU2022-03-09_17-09_964981565/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites Repository Creation Utility - Checking Prerequisites Checking Component Prerequisites Repository Creation Utility - Creating Tablespaces Validating and Creating Tablespaces Create tablespaces in the repository database Repository Creation Utility - Create Repository Create in progress. Percent Complete: 10 Executing pre create operations Percent Complete: 25 Percent Complete: 25 Percent Complete: 26 Percent Complete: 27 Percent Complete: 28 Percent Complete: 28 Percent Complete: 29 Percent Complete: 29 Creating Common Infrastructure Services(STB) Percent Complete: 36 Percent Complete: 36 Percent Complete: 44 Percent Complete: 44 Percent Complete: 44 Creating Audit Services Append(IAU_APPEND) Percent Complete: 51 Percent Complete: 51 Percent Complete: 59 Percent Complete: 59 Percent Complete: 59 Creating Audit Services Viewer(IAU_VIEWER) Percent Complete: 66 Percent Complete: 66 Percent Complete: 67 Percent Complete: 67 Percent Complete: 68 Percent Complete: 68 Creating Metadata Services(MDS) Percent Complete: 76 Percent Complete: 76 Percent Complete: 76 Percent Complete: 77 Percent Complete: 77 Percent Complete: 78 Percent Complete: 78 Percent Complete: 78 Creating Weblogic Services(WLS) Percent Complete: 82 Percent Complete: 82 Percent Complete: 83 Percent Complete: 84 Percent Complete: 86 Percent Complete: 88 Percent Complete: 88 Percent Complete: 88 Creating User Messaging Service(UCSUMS) Percent Complete: 92 Percent Complete: 92 Percent Complete: 95 Percent Complete: 95 Percent Complete: 100 Creating Audit Services(IAU) Creating Oracle Platform Security Services(OPSS) Creating SOA Infrastructure(SOAINFRA) Creating Oracle Identity Manager(OIM) Executing post create operations Repository Creation Utility: Create - Completion Summary Database details: ----------------------------- Host Name : mydatabasehost.example.com Port : 1521 Service Name : ORCL.EXAMPLE.COM Connected As : sys Prefix for (prefixable) Schema Owners : OIGK8S RCU Logfile : /tmp/RCU2022-03-09_17-09_964981565/logs/rcu.log Component schemas created: ----------------------------- Component Status Logfile Common Infrastructure Services Success /tmp/RCU2022-03-09_17-09_964981565/logs/stb.log Oracle Platform Security Services Success /tmp/RCU2022-03-09_17-09_964981565/logs/opss.log SOA Infrastructure Success /tmp/RCU2022-03-09_17-09_964981565/logs/soainfra.log Oracle Identity Manager Success /tmp/RCU2022-03-09_17-09_964981565/logs/oim.log User Messaging Service Success /tmp/RCU2022-03-09_17-09_964981565/logs/ucsums.log Audit Services Success /tmp/RCU2022-03-09_17-09_964981565/logs/iau.log Audit Services Append Success /tmp/RCU2022-03-09_17-09_964981565/logs/iau_append.log Audit Services Viewer Success /tmp/RCU2022-03-09_17-09_964981565/logs/iau_viewer.log Metadata Services Success /tmp/RCU2022-03-09_17-09_964981565/logs/mds.log WebLogic Services Success /tmp/RCU2022-03-09_17-09_964981565/logs/wls.log Repository Creation Utility - Create : Operation Completed [oracle@helper oracle]$ Run the following command to patch schemas in the database:\nThis command should be run if you are using an OIG image that contains OIG bundle patches. If using an OIG image without OIG bundle patches, then you can skip this step.\n [oracle@helper oracle]$ /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \\ -f /u01/oracle/idm/server/setup/deploy-files/automation.xml \\ run-patched-sql-files \\ -logger org.apache.tools.ant.NoBannerLogger \\ -logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \\ -DoperationsDB.host=$DB_HOST \\ -DoperationsDB.port=$DB_PORT \\ -DoperationsDB.serviceName=$DB_SERVICE \\ -DoperationsDB.user=${RCUPREFIX}_OIM \\ -DOIM.DBPassword=$RCU_SCHEMA_PWD \\ -Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar The output will look similar to the following:\nBuildfile: /u01/oracle/idm/server/setup/deploy-files/automation.xml Verify the database was patched successfully by viewing the patch_oim_wls.log:\n[oracle@helper oracle]$ cat /u01/oracle/idm/server/bin/patch_oim_wls.log The output should look similar to below:\n... [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/StoredProcedures/OfflineDataPurge/oim_pkg_offline_datapurge_pkg_body.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_RequestJustificationLocale.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_reportee_chain_for_mgr.sql [sql] 36 of 36 SQL statements executed successfully BUILD SUCCESSFUL Total time: 5 second Exit the helper bash shell by issuing the command exit.\n Preparing the environment for domain creation In this section you prepare the environment for the OIG domain creation. This involves the following steps:\na. Creating Kubernetes secrets for the domain and RCU\nb. Create a Kubernetes persistent volume and persistent volume claim\nCreating Kubernetes secrets for the domain and RCU Create a Kubernetes secret for the domain using the create-weblogic-credentials script in the same Kubernetes namespace as the domain:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;pwd\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -d \u0026lt;domain_uid\u0026gt; -s \u0026lt;kubernetes_domain_secret\u0026gt; where:\n-u weblogic is the WebLogic username\n-p \u0026lt;pwd\u0026gt; is the password for the WebLogic user\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-d \u0026lt;domain_uid\u0026gt; is the domain UID to be created. The default is domain1 if not specified\n-s \u0026lt;kubernetes_domain_secret\u0026gt; is the name you want to create for the secret for this namespace. The default is to use the domainUID if not specified\nFor example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-credentials $ ./create-weblogic-credentials.sh -u weblogic -p \u0026lt;password\u0026gt; -n oigns -d governancedomain -s oig-domain-credentials The output will look similar to the following:\nsecret/oig-domain-credentials created secret/oig-domain-credentials labeled The secret oig-domain-credentials has been successfully created in the oigns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_domain_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret oig-domain-credentials -o yaml -n oigns The output will look similar to the following:\n$ kubectl get secret oig-domain-credentials -o yaml -n oigns apiVersion: v1 data: password: V2VsY29tZTE= username: d2VibG9naWM= kind: Secret metadata: creationTimestamp: \u0026quot;2022-03-09T17:47:29Z\u0026quot; labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain name: oig-domain-credentials namespace: oigns resourceVersion: \u0026quot;3216738\u0026quot; uid: c2ec07e0-0135-458d-bceb-c648d2a9ac54 type: Opaque Create a Kubernetes secret for RCU in the same Kubernetes namespace as the domain, using the create-weblogic-credentials.sh script:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u \u0026lt;rcu_prefix\u0026gt; -p \u0026lt;rcu_schema_pwd\u0026gt; -a sys -q \u0026lt;sys_db_pwd\u0026gt; -d \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; -s \u0026lt;kubernetes_rcu_secret\u0026gt; where:\n-u \u0026lt;rcu_prefix\u0026gt; is the name of the RCU schema prefix created previously\n-p \u0026lt;rcu_schema_pwd\u0026gt; is the password for the RCU schema prefix\n-a \u0026lt;sys_db_user\u0026gt; is the database user with sys dba privilege\n-q \u0026lt;sys_db_pwd\u0026gt; is the sys database password\n-d \u0026lt;domain_uid\u0026gt; is the domain_uid that you created earlier\n-n \u0026lt;domain_namespace\u0026gt; is the domain namespace\n-s \u0026lt;kubernetes_rcu_secret\u0026gt; is the name of the rcu secret to create\nFor example:\n$ cd $WORKDIR/kubernetes/create-rcu-credentials $ ./create-rcu-credentials.sh -u OIGK8S -p \u0026lt;password\u0026gt; -a sys -q \u0026lt;password\u0026gt; -d governancedomain -n oigns -s oig-rcu-credentials The output will look similar to the following:\nsecret/oig-rcu-credentials created secret/oig-rcu-credentials labeled The secret oig-rcu-credentials has been successfully created in the oigns namespace. Verify the secret is created using the following command:\n$ kubectl get secret \u0026lt;kubernetes_rcu_secret\u0026gt; -o yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get secret oig-rcu-credentials -o yaml -n oigns The output will look similar to the following:\napiVersion: v1 data: password: V2VsY29tZTE= sys_password: V2VsY29tZTE= sys_username: c3lz username: T0lHSzhT kind: Secret metadata: creationTimestamp: \u0026quot;2022-03-09T17:50:50Z\u0026quot; labels: weblogic.domainName: governancedomain weblogic.domainUID: governancedomain name: oig-rcu-credentials namespace: oigns resourceVersion: \u0026quot;3217023\u0026quot; uid: ce70b91a-fbbc-4839-9616-4cc2c1adeb4f type: Opaque Create a Kubernetes persistent volume and persistent volume claim As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nA persistent volume is the same as a disk mount but is inside a container. A Kubernetes persistent volume is an arbitrary name (determined in this case, by Oracle) that is mapped to a physical volume on a disk.\nWhen a container is started, it needs to mount that volume. The physical volume should be on a shared disk accessible by all the Kubernetes worker nodes because it is not known on which worker node the container will be started. In the case of Identity and Access Management, the persistent volume does not get erased when a container stops. This enables persistent configurations.\nThe example below uses an NFS mounted volume (\u0026lt;persistent_volume\u0026gt;/governancedomainpv). Other volume types can also be used. See the official Kubernetes documentation for Volumes.\nNote: The persistent volume directory needs to be accessible to both the master and worker node(s). Make sure this path has full access permissions, and that the folder is empty. In this example /scratch/shared/governancedomainpv is accessible from all nodes via NFS.\n Make a backup copy of the create-pv-pvc-inputs.yaml file and create required directories:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p \u0026lt;persistent_volume\u0026gt;/governancedomainpv $ chmod -R 777 \u0026lt;persistent_volume\u0026gt;/governancedomainpv For example:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc $ cp create-pv-pvc-inputs.yaml create-pv-pvc-inputs.yaml.orig $ mkdir output $ mkdir -p /scratch/shared/governancedomainpv $ chmod -R 777 /scratch/shared/governancedomainpv On the master node run the following command to ensure it is possible to read and write to the persistent volume:\ncd \u0026lt;persistent_volume\u0026gt;/governancedomainpv touch file.txt ls filemaster.txt For example:\ncd /scratch/shared/governancedomainpv touch filemaster.txt ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\ncd /scratch/shared/governancedomainpv ls filemaster.txt touch fileworker1.txt ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n Navigate to $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc:\n$ cd $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc and edit the create-pv-pvc-inputs.yaml file and update the following parameters to reflect your settings. Save the file when complete:\nbaseName: \u0026lt;domain\u0026gt; domainUID: \u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; weblogicDomainStorageType: NFS weblogicDomainStorageNFSServer: \u0026lt;nfs_server\u0026gt; weblogicDomainStoragePath: \u0026lt;physical_path_of_persistent_storage\u0026gt; weblogicDomainStorageSize: 10Gi For example:\n# The base name of the pv and pvc baseName: domain # Unique ID identifying a domain. # If left empty, the generated pv can be shared by multiple domains # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: governancedomain # Name of the namespace for the persistent volume claim namespace: oigns # Persistent volume type for the persistent storage. # The value must be 'HOST_PATH' or 'NFS'. # If using 'NFS', weblogicDomainStorageNFSServer must be specified. weblogicDomainStorageType: NFS # The server name or ip address of the NFS server to use for the persistent storage. # The following line must be uncomment and customized if weblogicDomainStorateType is NFS: weblogicDomainStorageNFSServer: mynfsserver # Physical path of the persistent storage. # When weblogicDomainStorageType is set to HOST_PATH, this value should be set the to path to the # domain storage on the Kubernetes host. # When weblogicDomainStorageType is set to NFS, then weblogicDomainStorageNFSServer should be set # to the IP address or name of the DNS server, and this value should be set to the exported path # on that server. # Note that the path where the domain is mounted in the WebLogic containers is not affected by this # setting, that is determined when you create your domain. # The following line must be uncomment and customized: weblogicDomainStoragePath: /scratch/shared/governancedomainpv # Reclaim policy of the persistent storage # The valid values are: 'Retain', 'Delete', and 'Recycle' weblogicDomainStorageReclaimPolicy: Retain # Total storage allocated to the persistent storage. weblogicDomainStorageSize: 10Gi Execute the create-pv-pvc.sh script to create the PV and PVC configuration files:\n$ ./create-pv-pvc.sh -i create-pv-pvc-inputs.yaml -o output The output will be similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-pv-pvc-inputs-v1\u0026quot; export baseName=\u0026quot;domain\u0026quot; export domainUID=\u0026quot;governancedomain\u0026quot; export namespace=\u0026quot;oigns\u0026quot; export weblogicDomainStorageType=\u0026quot;NFS\u0026quot; export weblogicDomainStorageNFSServer=\u0026quot;mynfsserver\u0026quot; export weblogicDomainStoragePath=\u0026quot;/scratch/shared/governancedomainpv\u0026quot; export weblogicDomainStorageReclaimPolicy=\u0026quot;Retain\u0026quot; export weblogicDomainStorageSize=\u0026quot;10Gi\u0026quot; Generating output/pv-pvcs/governancedomain-domain-pv.yaml Generating output/pv-pvcs/governancedomain-domain-pvc.yaml The following files were generated: output/pv-pvcs/governancedomain-domain-pv.yaml output/pv-pvcs/governancedomain-domain-pvc.yaml Completed Run the following to show the files are created:\n$ ls output/pv-pvcs create-pv-pvc-inputs.yaml governancedomain-domain-pv.yaml governancedomain-domain-pvc.yaml Run the following kubectl command to create the PV and PVC in the domain namespace:\n$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n \u0026lt;domain_namespace\u0026gt; $ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create -f output/pv-pvcs/governancedomain-domain-pv.yaml -n oigns $ kubectl create -f output/pv-pvcs/governancedomain-domain-pvc.yaml -n oigns The output will look similar to the following:\npersistentvolume/governancedomain-domain-pv created persistentvolumeclaim/governancedomain-domain-pvc created Run the following commands to verify the PV and PVC were created successfully:\n$ kubectl describe pv \u0026lt;pv_name\u0026gt; $ kubectl describe pvc \u0026lt;pvc_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pv governancedomain-domain-pv $ kubectl describe pvc governancedomain-domain-pvc -n oigns The output will look similar to the following:\n$ kubectl describe pv governancedomain-domain-pv Name: governancedomain-domain-pv Labels: weblogic.domainUID=governancedomain Annotations: pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pv-protection] StorageClass: governancedomain-domain-storage-class Status: Bound Claim: oigns/governancedomain-domain-pvc Reclaim Policy: Retain Access Modes: RWX VolumeMode: Filesystem Capacity: 10Gi Node Affinity: \u0026lt;none\u0026gt; Message: Source: Type: NFS (an NFS mount that lasts the lifetime of a pod) Server: mynfsserver Path: /scratch/shared/governancedomainpv ReadOnly: false Events: \u0026lt;none\u0026gt; $ kubectl describe pvc governancedomain-domain-pvc -n oigns Name: governancedomain-domain-pvc Namespace: oigns StorageClass: governancedomain-domain-storage-class Status: Bound Volume: governancedomain-domain-pv Labels: weblogic.domainUID=governancedomain Annotations: pv.kubernetes.io/bind-completed: yes pv.kubernetes.io/bound-by-controller: yes Finalizers: [kubernetes.io/pvc-protection] Capacity: 10Gi Access Modes: RWX VolumeMode: Filesystem Mounted By: \u0026lt;none\u0026gt; Events: \u0026lt;none\u0026gt; You are now ready to create the OIG domain as per Create OIG Domains\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/manage-oig-domains/running-oig-utilities/", + "title": "Runnning OIG utilities", + "tags": [], + "description": "Describes the steps for running OIG utilities in Kubernetes.", + "content": "Run OIG utlities inside the OIG Kubernetes cluster.\nRun utilities in an interactive bash shell Access a bash shell inside the \u0026lt;domain_uid\u0026gt;-oim-server1 pod:\n$ kubectl -n oigns exec -it \u0026lt;domain_uid\u0026gt;-oim-server1 -- bash For example:\n$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running \u0026lt;domain_uid\u0026gt;-oim-server1 pod:\n[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. For example:\n[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin [oracle@governancedomain-oim-server1 bin]$ ./\u0026lt;filename\u0026gt;.sh Note: Some utilties such as PurgeCache.sh, GenerateSnapshot.sh etc, may prompt to enter the t3 URL, for example:\n[oracle@governancedomain-oim-server1 bin]$ sh GenerateSnapshot.sh For running the Utilities the following environment variables need to be set APP_SERVER is weblogic OIM_ORACLE_HOME is /u01/oracle/idm/ JAVA_HOME is /u01/jdk MW_HOME is /u01/oracle WL_HOME is /u01/oracle/wlserver DOMAIN_HOME is /u01/oracle/user_projects/domains/governancedomain Executing -Dweblogic.security.SSL.trustedCAKeyStore= in IPv4 mode [Enter Xellerate admin username :]xelsysadm [Enter password for xelsysadm :] [Threads to use [ 8 ]] [Enter serverURL :[t3://oimhostname:oimportno ]] To find the t3 URL run:\n$ kubectl get services -n oigns | grep oim-cluster The output will look similar to the following:\ngovernancedomain-cluster-oim-cluster ClusterIP 10.110.161.82 \u0026lt;none\u0026gt; 14002/TCP,14000/TCP 4d In this case the t3 URL is: t3://governancedomain-cluster-oim-cluster:14000.\n Passing inputs as a jar/xml file Copy the input file to pass to a directory of your choice.\n Run the following command to copy the input file to the running governancedomain-oim-server1 pod.\n$ kubectl -n oigns cp /\u0026lt;path\u0026gt;/\u0026lt;inputFile\u0026gt; governancedomain-oim-server1:/u01/oracle/idm/server/bin/ Access a bash shell inside the governancedomain-oim-server1 pod:\n$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running governancedomain-oim-server1 pod:\n[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required, passing the input file. For example:\n[oracle@governancedomain-oim-server1 oracle] cd /u01/oracle/idm/server/bin [oracle@governancedomain-oim-server1 bin]$ ./\u0026lt;filename\u0026gt;.sh -inputFile \u0026lt;inputFile\u0026gt; Note As pods are stateless the copied input file will remain until the pod restarts.\n Editing property/profile files To edit a property/profile file in the Kubernetes cluster:\n Copy the input file from the pod to a on the local system, for example:\n$ kubectl -n oigns cp governancedomain-oim-server1:/u01/oracle/idm/server/bin/\u0026lt;file.properties_profile\u0026gt; /\u0026lt;path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; Note: If you see the message tar: Removing leading '/' from member names this can be ignored.\n Edit the \u0026lt;/path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; in an editor of your choice.\n Copy the file back to the pod:\n$ kubectl -n oigns cp /\u0026lt;path\u0026gt;/\u0026lt;file.properties_profile\u0026gt; governancedomain-oim-server1:/u01/oracle/idm/server/bin/ Note: As pods are stateless the copied input file will remain until the pod restarts. Preserve a local copy in case you need to copy files back after pod restart.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/create-oam-domains/", + "title": "Create OAM domains", + "tags": [], + "description": "Sample for creating an OAM domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OAM domain.", + "content": " Introduction\n Prerequisites\n Prepare the create domain script\n Run the create domain script\n Set the OAM server memory parameters\n Initializing the domain\n Verify the results\na. Verify the domain, pods and services\nb. Verify the domain\nc. Verify the pods\n Introduction The OAM deployment scripts demonstrate the creation of an OAM domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.\nPrerequisites Before you begin, perform the following steps:\n Review the Domain resource documentation. Ensure that you have executed all the preliminary steps documented in Prepare your environment. Ensure that the database is up and running. Prepare the create domain script The sample scripts for Oracle Access Management domain deployment are available at $WORKDIR/kubernetes/create-access-domain.\n Make a copy of the create-domain-inputs.yaml file:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:\ndomainUID: \u0026lt;domain_uid\u0026gt; domainHome: /u01/oracle/user_projects/domains/\u0026lt;domain_uid\u0026gt; image: \u0026lt;image_name\u0026gt;:\u0026lt;tag\u0026gt; imagePullSecretName: \u0026lt;container_registry_secret\u0026gt; weblogicCredentialsSecretName: \u0026lt;kubernetes_domain_secret\u0026gt; logHome: /u01/oracle/user_projects/domains/logs/\u0026lt;domain_uid\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; persistentVolumeClaimName: \u0026lt;pvc_name\u0026gt; rcuSchemaPrefix: \u0026lt;rcu_prefix\u0026gt; rcuDatabaseURL: \u0026lt;rcu_db_host\u0026gt;:\u0026lt;rcu_db_port\u0026gt;/\u0026lt;rcu_db_service_name\u0026gt; rcuCredentialsSecret: \u0026lt;kubernetes_rcu_secret\u0026gt; Note : imagePullSecretName is not required if you are not using a container registry.\nFor example:\ndomainUID: accessdomain domainHome: /u01/oracle/user_projects/domains/accessdomain image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-220119.2059 imagePullSecretName: orclcred weblogicCredentialsSecretName: accessdomain-credentials logHome: /u01/oracle/user_projects/domains/logs/accessdomain namespace: oamns persistentVolumeClaimName: accessdomain-domain-pvc rcuSchemaPrefix: OAMK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: accessdomain-rcu-credentials A full list of parameters in the create-domain-inputs.yaml file are shown below:\n Parameter Definition Default adminPort Port number for the Administration Server inside the Kubernetes cluster. 7001 adminNodePort Port number of the Administration Server outside the Kubernetes cluster. 30701 adminServerName Name of the Administration Server. AdminServer clusterName Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oam_cluster for the OAM domain. oam_cluster configuredManagedServerCount Number of Managed Server instances to generate for the domain. 5 createDomainFilesDir Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. wlst createDomainScriptsMountPath Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. /u01/weblogic createDomainScriptName Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. create-domain-job.sh domainHome Home directory of the OAM domain. If not specified, the value is derived from the domainUID as /shared/domains/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/accessinfra domainPVMountPath Mount path of the domain persistent volume. /u01/oracle/user_projects domainUID Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. accessinfra domainType Type of the domain. Mandatory input for OAM domains. You must provide one of the supported domain type value: oam (deploys an OAM domain) oam exposeAdminNodePort Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. false exposeAdminT3Channel Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. true image OAM container image. The operator requires OAM 12.2.1.4. Refer to Obtain the OAM container image for details on how to obtain or create the image. oracle/oam:12.2.1.4.0 imagePullPolicy WebLogic container image pull policy. Legal values are IfNotPresent, Always, or Never IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the container registry to pull the OAM container image. The presence of the secret will be validated when this parameter is specified. includeServerOutInPodLog Boolean indicating whether to include the server .out to the pod\u0026rsquo;s stdout. true initialManagedServerReplicas Number of Managed Servers to initially start for the domain. 2 javaOptions Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). -Dweblogic.StdoutDebugEnabled=false logHome The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/logs/accessinfra managedServerNameBase Base string used to generate Managed Server names. oam_server managedServerPort Port number for each Managed Server. 8001 namespace Kubernetes namespace in which to create the domain. accessns persistentVolumeClaimName Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-sample-pvc. accessinfra-domain-pvc productionModeEnabled Boolean indicating if production mode is enabled for the domain. true serverStartPolicy Determines which WebLogic Server instances will be started. Legal values are NEVER, IF_NEEDED, ADMIN_ONLY. IF_NEEDED t3ChannelPort Port for the T3 channel of the NetworkAccessPoint. 30012 t3PublicAddress Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. accessinfra-domain-credentials weblogicImagePullSecretName Name of the Kubernetes secret for the container registry, used to pull the WebLogic Server image. serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimit The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. Resource requests and resource limits are not specified. rcuSchemaPrefix The schema prefix to use in the database, for example OAM1. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. OAM1 rcuDatabaseURL The database URL. oracle-db.default.svc.cluster.local:1521/devpdb.k8s rcuCredentialsSecret The Kubernetes secret containing the database credentials. accessinfra-rcu-credentials Note that the names of the Kubernetes resources in the generated YAML files may be formed with the value of some of the properties specified in the create-inputs.yaml file. Those properties include the adminServerName, clusterName and managedServerNameBase. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore (\u0026quot;_\u0026quot;) is converted to a hyphen (\u0026quot;-\u0026quot;).\nThe sample demonstrates how to create an OAM domain home and associated Kubernetes resources for a domain that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.\nRun the create domain script Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o /\u0026lt;path to output-directory\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv $ ./create-domain.sh -i create-domain-inputs.yaml -o output The output will look similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;accessdomain\u0026quot; export domainType=\u0026quot;oam\u0026quot; export domainHome=\u0026quot;/u01/oracle/user_projects/domains/accessdomain\u0026quot; export serverStartPolicy=\u0026quot;IF_NEEDED\u0026quot; export clusterName=\u0026quot;oam_cluster\u0026quot; export configuredManagedServerCount=\u0026quot;5\u0026quot; export initialManagedServerReplicas=\u0026quot;2\u0026quot; export managedServerNameBase=\u0026quot;oam_server\u0026quot; export managedServerPort=\u0026quot;14100\u0026quot; export image=\u0026quot;container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-220119.2059\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export imagePullSecretName=\u0026quot;orclcred\u0026quot; export productionModeEnabled=\u0026quot;true\u0026quot; export weblogicCredentialsSecretName=\u0026quot;accessdomain-credentials\u0026quot; export includeServerOutInPodLog=\u0026quot;true\u0026quot; export logHome=\u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain\u0026quot; export httpAccessLogInLogHome=\u0026quot;true\u0026quot; export t3ChannelPort=\u0026quot;30012\u0026quot; export exposeAdminT3Channel=\u0026quot;false\u0026quot; export adminNodePort=\u0026quot;30701\u0026quot; export exposeAdminNodePort=\u0026quot;false\u0026quot; export namespace=\u0026quot;oamns\u0026quot; javaOptions=-Dweblogic.StdoutDebugEnabled=false export persistentVolumeClaimName=\u0026quot;accessdomain-domain-pvc\u0026quot; export domainPVMountPath=\u0026quot;/u01/oracle/user_projects/domains\u0026quot; export createDomainScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export createDomainScriptName=\u0026quot;create-domain-job.sh\u0026quot; export createDomainFilesDir=\u0026quot;wlst\u0026quot; export rcuSchemaPrefix=\u0026quot;OAMK8S\u0026quot; export rcuDatabaseURL=\u0026quot;mydatabasehost.example.com:1521/orcl.example.com\u0026quot; export rcuCredentialsSecret=\u0026quot;accessdomain-rcu-credentials\u0026quot; createFiles - valuesInputFile is create-domain-inputs.yaml createDomainScriptName is create-domain-job.sh Generating output/weblogic-domains/accessdomain/create-domain-job.yaml Generating output/weblogic-domains/accessdomain/delete-domain-job.yaml Generating output/weblogic-domains/accessdomain/domain.yaml Checking to see if the secret accessdomain-credentials exists in namespace oamns configmap/accessdomain-create-oam-infra-domain-job-cm created Checking the configmap accessdomain-create-oam-infra-domain-job-cm was created configmap/accessdomain-create-oam-infra-domain-job-cm labeled Checking if object type job with name accessdomain-create-oam-infra-domain-job exists No resources found in oamns namespace. Creating the domain by creating the job output/weblogic-domains/accessdomain/create-domain-job.yaml job.batch/accessdomain-create-oam-infra-domain-job created Waiting for the job to complete... status on iteration 1 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 2 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 3 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 4 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 5 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Running status on iteration 6 of 20 pod accessdomain-create-oam-infra-domain-job-6tgw4 status is Completed Domain accessdomain was created and will be started by the WebLogic Kubernetes Operator The following files were generated: output/weblogic-domains/accessdomain/create-domain-inputs.yaml output/weblogic-domains/accessdomain/create-domain-job.yaml output/weblogic-domains/accessdomain/domain.yaml Note: If the domain creation fails, refer to the Troubleshooting section.\nThe command creates a domain.yaml file required for domain creation.\n Set the OAM server memory parameters By default, the java memory parameters assigned to the oam_server cluster are very small. The minimum recommended values are -Xms4096m -Xmx8192m. However, Oracle recommends you to set these to -Xms8192m -Xmx8192m in a production environment.\n Navigate to the /output/weblogic-domains/\u0026lt;domain_uid\u0026gt; directory:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain Edit the domain.yaml file and locate the section of the file starting with: - clusterName: oam_cluster. Immediately after the line: topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot;, add the following lines:\nenv: - name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; For example:\n - clusterName: oam_cluster serverService: precreateService: true serverStartState: \u0026quot;RUNNING\u0026quot; serverPod: # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not # already members of the same cluster. affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: \u0026quot;weblogic.clusterName\u0026quot; operator: In values: - $(CLUSTER_NAME) topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot; env:\t- name: USER_MEM_ARGS value: \u0026quot;-XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m\u0026quot; replicas: 2 In the domain.yaml locate the section of the file starting with adminServer:. Under the env: tag add the following CLASSPATH entries. This is required for running the idmconfigtool from the Administration Server.\n- name: CLASSPATH value: \u0026quot;/u01/oracle/wlserver/server/lib/weblogic.jar\u0026quot; For example:\n adminServer: # serverStartState legal values are \u0026quot;RUNNING\u0026quot; or \u0026quot;ADMIN\u0026quot; # \u0026quot;RUNNING\u0026quot; means the listed server will be started up to \u0026quot;RUNNING\u0026quot; mode # \u0026quot;ADMIN\u0026quot; means the listed server will be start up to \u0026quot;ADMIN\u0026quot; mode serverStartState: \u0026quot;RUNNING\u0026quot; adminService: channels: # The Admin Server's NodePort - channelName: default nodePort: 30701 # Uncomment to export the T3Channel as a service - channelName: T3Channel serverPod: # an (optional) list of environment variable to be set on the admin servers env: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m \u0026quot; - name: CLASSPATH value: \u0026quot;/u01/oracle/wlserver/server/lib/weblogic.jar\u0026quot; If required, you can add the optional parameter maxClusterConcurrentStartup to the spec section of the domain.yaml. This parameter specifies the number of managed servers to be started in sequence per cluster. For example if you updated the initialManagedServerReplicas to 4 in create-domain-inputs.yaml and only had 2 nodes, then setting maxClusterConcurrentStartup: 1 will start one managed server at a time on each node, rather than starting them all at once. This can be useful to take the strain off individual nodes at startup. Below is an example with the parameter added:\napiVersion: \u0026quot;weblogic.oracle/v8\u0026quot; kind: Domain metadata: name: accessdomain namespace: oamns labels: weblogic.domainUID: accessdomain spec: # The WebLogic Domain Home domainHome: /u01/oracle/user_projects/domains/accessdomain maxClusterConcurrentStartup: 1 # The domain home source type # Set to PersistentVolume for domain-in-pv, Image for domain-in-image, or FromModel for model-in-image domainHomeSourceType: PersistentVolume # The WebLogic Server container image that the Operator uses to start the domain image: \u0026quot;oracle/oam:12.2.1.4.0\u0026quot; .... Save the changes to domain.yaml\n Initializing the domain Create the Kubernetes resource using the following command:\n$ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt;/domain.yaml For example:\n$ kubectl apply -f $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain/domain.yaml The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain created Verify the results Verify the domain, pods and services Verify the domain, servers pods and services are created and in the READY state with a status of 1/1, by running the following command:\n$ kubectl get all,domains -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get all,domains -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/accessdomain-adminserver 1/1 Running 0 11m pod/accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 18m pod/accessdomain-oam-policy-mgr1 1/1 Running 0 3m31s pod/accessdomain-oam-policy-mgr2 1/1 Running 0 3m31s pod/accessdomain-oam-server1 1/1 Running 0 3m31s pod/accessdomain-oam-server2 1/1 Running 0 3m31s pod/helper 1/1 Running 0 33m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/accessdomain-adminserver ClusterIP None \u0026lt;none\u0026gt; 7001/TCP 11m service/accessdomain-cluster-oam-cluster ClusterIP 10.101.59.154 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-cluster-policy-cluster ClusterIP 10.98.236.51 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr1 ClusterIP None \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr2 ClusterIP None \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr3 ClusterIP 10.96.244.37 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr4 ClusterIP 10.105.201.23 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-policy-mgr5 ClusterIP 10.110.12.227 \u0026lt;none\u0026gt; 15100/TCP 3m31s service/accessdomain-oam-server1 ClusterIP None \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server2 ClusterIP None \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server3 ClusterIP 10.103.178.35 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server4 ClusterIP 10.97.254.78 \u0026lt;none\u0026gt; 14100/TCP 3m31s service/accessdomain-oam-server5 ClusterIP 10.105.65.104 \u0026lt;none\u0026gt; 14100/TCP 3m31s NAME COMPLETIONS DURATION AGE job.batch/accessdomain-create-oam-infra-domain-job 1/1 2m6s 18m NAME AGE domain.weblogic.oracle/accessdomain 12m Note: It will take several minutes before all the services listed above show. When a pod has a STATUS of 0/1 the pod is started but the OAM server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs accessdomain-adminserver -n oamns $ kubectl logs accessdomain-oam-policy-mgr1 -n oamns $ kubectl logs accessdomain-oam-server1 -n oamns etc.. The default domain created by the script has the following characteristics:\n An Administration Server named AdminServer listening on port 7001. A configured OAM cluster named oam_cluster of size 5. A configured Policy Manager cluster named policy_cluster of size 5. Two started OAM managed Servers, named oam_server1 and oam_server2, listening on port 14100. Two started Policy Manager managed servers named oam-policy-mgr1 and oam-policy-mgr2, listening on port 15100. Log files that are located in \u0026lt;persistent_volume\u0026gt;/logs/\u0026lt;domainUID\u0026gt;. Verify the domain Run the following command to describe the domain:\n$ kubectl describe domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe domain accessdomain -n oamns The output will look similar to the following:\nName: accessdomain Namespace: oamns Labels: weblogic.domainUID=accessdomain Annotations: \u0026lt;none\u0026gt; API Version: weblogic.oracle/v8 Kind: Domain Metadata: Creation Timestamp: 2022-03-07T11:59:51Z Generation: 1 Managed Fields: API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:status: .: f:clusters: f:conditions: f:introspectJobFailureCount: f:servers: f:startTime: Manager: Kubernetes Java Client Operation: Update Time: 2022-03-07T11:59:51Z API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:metadata: f:annotations: .: f:kubectl.kubernetes.io/last-applied-configuration: f:labels: .: f:weblogic.domainUID: Manager: kubectl-client-side-apply Operation: Update Time: 2022-03-07T11:59:51Z Resource Version: 1495179 UID: a90107d5-dbaf-4d86-9439-d5369faabd35 Spec: Admin Server: Server Pod: Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Name: CLASSPATH Value: /u01/oracle/wlserver/server/lib/weblogic.jar Server Start State: RUNNING Clusters: Cluster Name: policy_cluster Replicas: 2 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Server Service: Precreate Service: true Server Start State: RUNNING Cluster Name: oam_cluster Replicas: 2 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Env: Name: USER_MEM_ARGS Value: -XX:+UseContainerSupport -Djava.security.egd=file:/dev/./urandom -Xms8192m -Xmx8192m Server Service: Precreate Service: true Server Start State: RUNNING Data Home: Domain Home: /u01/oracle/user_projects/domains/accessdomain Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true Image: container-registry.oracle.com/middleware/oam_cpu:12.2.1.4-jdk8-ol7-220119.2059 Image Pull Policy: IfNotPresent Image Pull Secrets: Name: orclcred Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/accessdomain Log Home Enabled: true Server Pod: Env: Name: JAVA_OPTIONS Value: -Dweblogic.StdoutDebugEnabled=false Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m Volume Mounts: Mount Path: /u01/oracle/user_projects/domains Name: weblogic-domain-storage-volume Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: Claim Name: accessdomain-domain-pvc Server Start Policy: IF_NEEDED Web Logic Credentials Secret: Name: accessdomain-credentials Status: Clusters: Cluster Name: oam_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 2 Replicas: 2 Replicas Goal: 2 Cluster Name: policy_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 2 Replicas: 2 Replicas Goal: 2 Conditions: Last Transition Time: 2022-03-07T12:11:52.623959Z Reason: ServersReady Status: True Type: Available Introspect Job Failure Count: 0 Servers: Desired State: RUNNING Health: Activation Time: 2022-03-07T12:08:29.271000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.252 Server Name: AdminServer State: RUNNING Cluster Name: oam_cluster Desired State: RUNNING Health: Activation Time: 2022-03-07T12:11:02.696000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.255 Server Name: oam_server1 State: RUNNING Cluster Name: oam_cluster Desired State: RUNNING Health: Activation Time: 2022-03-07T12:11:46.175000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.252 Server Name: oam_server2 State: RUNNING Cluster Name: oam_cluster Desired State: SHUTDOWN Server Name: oam_server3 Cluster Name: oam_cluster Desired State: SHUTDOWN Server Name: oam_server4 Cluster Name: oam_cluster Desired State: SHUTDOWN Server Name: oam_server5 Cluster Name: policy_cluster Desired State: RUNNING Health: Activation Time: 2022-03-07T12:11:20.404000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.255 Server Name: oam_policy_mgr1 State: RUNNING Cluster Name: policy_cluster Desired State: RUNNING Health: Activation Time: 2022-03-07T12:11:09.719000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.42.252 Server Name: oam_policy_mgr2 State: RUNNING Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr3 Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr4 Cluster Name: policy_cluster Desired State: SHUTDOWN Server Name: oam_policy_mgr5 Start Time: 2022-03-07T11:59:51.682687Z Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal DomainCreated 13m weblogic.operator Domain resource accessdomain was created Normal DomainProcessingStarting 5m9s (x2 over 13m) weblogic.operator Creating or updating Kubernetes presence for WebLogic Domain with UID accessdomain Normal DomainProcessingCompleted 114s weblogic.operator Successfully completed processing domain resource accessdomain In the Status section of the output, the available servers and clusters are listed.\n Verify the pods Run the following command to see the pods running the servers and which nodes they are running on:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; -o wide For example:\n$ kubectl get pods -n oamns -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES accessdomain-adminserver 1/1 Running 0 18m 10.244.6.63 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 25m 10.244.6.61 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-policy-mgr1 1/1 Running 0 10m 10.244.5.13 10.250.42.255 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-policy-mgr2 1/1 Running 0 10m 10.244.6.65 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-server1 1/1 Running 0 10m 10.244.5.12 10.250.42.255 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; accessdomain-oam-server2 1/1 Running 0 10m 10.244.6.64 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; helper 1/1 Running 0 40m 10.244.6.60 10.250.42.252 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; You are now ready to configure an Ingress to direct traffic for your OAM domain as per Configure an Ingress for an OAM domain.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/create-oig-domains/", + "title": "Create OIG domains", + "tags": [], + "description": "Sample for creating an OIG domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain.", + "content": " Introduction\n Prerequisites\n Prepare the create domain script\n Run the create domain script\na. Generate the create domain script\nb. Setting the OIM server memory parameters\nc. Run the create domain scripts\n Verify the results\na. Verify the domain, pods and services\nb. Verify the domain\nc. Verify the pods\n Introduction The OIG deployment scripts demonstrate the creation of an OIG domain home on an existing Kubernetes persistent volume (PV) and persistent volume claim (PVC). The scripts also generate the domain YAML file, which can then be used to start the Kubernetes artifacts of the corresponding domain.\nPrerequisites Before you begin, perform the following steps:\n Review the Domain resource documentation. Ensure that you have executed all the preliminary steps documented in Prepare your environment. Ensure that the database is up and running. Prepare the create domain script The sample scripts for Oracle Identity Governance domain deployment are available at $WORKDIR/kubernetes/create-oim-domain.\n Make a copy of the create-domain-inputs.yaml file:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ cp create-domain-inputs.yaml create-domain-inputs.yaml.orig Edit the create-domain-inputs.yaml and modify the following parameters. Save the file when complete:\ndomainUID: \u0026lt;domain_uid\u0026gt; domainHome: /u01/oracle/user_projects/domains/\u0026lt;domain_uid\u0026gt; image: \u0026lt;image_name\u0026gt; imagePullSecretName: \u0026lt;container_registry_secret\u0026gt; weblogicCredentialsSecretName: \u0026lt;kubernetes_domain_secret\u0026gt; logHome: /u01/oracle/user_projects/domains/logs/\u0026lt;domain_id\u0026gt; namespace: \u0026lt;domain_namespace\u0026gt; persistentVolumeClaimName: \u0026lt;pvc_name\u0026gt; rcuSchemaPrefix: \u0026lt;rcu_prefix\u0026gt; rcuDatabaseURL: \u0026lt;rcu_db_host\u0026gt;:\u0026lt;rcu_db_port\u0026gt;/\u0026lt;rcu_db_service_name\u0026gt; rcuCredentialsSecret: \u0026lt;kubernetes_rcu_secret\u0026gt; frontEndHost: \u0026lt;front_end_hostname\u0026gt; frontEndPort: \u0026lt;front_end_port\u0026gt; Note : imagePullSecretName is not required if you are not using a container registry.\nFor example:\ndomainUID: governancedomain domainHome: /u01/oracle/user_projects/domains/governancedomain image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359 imagePullSecretName: orclcred weblogicCredentialsSecretName: oig-domain-credentials logHome: /u01/oracle/user_projects/domains/logs/governancedomain namespace: oigns persistentVolumeClaimName: governancedomain-domain-pvc rcuSchemaPrefix: OIGK8S rcuDatabaseURL: mydatabasehost.example.com:1521/orcl.example.com rcuCredentialsSecret: oig-rcu-credentials frontEndHost: example.com frontEndPort: 14100 Note: For now frontEndHost and front_end_port should be set to example.com and 14100 respectively. These values will be changed to the correct values in post installation tasks in Set OIMFrontendURL using MBeans.\n A full list of parameters in the create-domain-inputs.yaml file are shown below:\n Parameter Definition Default adminPort Port number for the Administration Server inside the Kubernetes cluster. 7001 adminNodePort Port number of the Administration Server outside the Kubernetes cluster. 30701 adminServerName Name of the Administration Server. AdminServer clusterName Name of the WebLogic cluster instance to generate for the domain. By default the cluster name is oimcluster for the OIG domain. oimcluster configuredManagedServerCount Number of Managed Server instances to generate for the domain. 5 createDomainFilesDir Directory on the host machine to locate all the files to create a WebLogic domain, including the script that is specified in the createDomainScriptName property. By default, this directory is set to the relative path wlst, and the create script will use the built-in WLST offline scripts in the wlst directory to create the WebLogic domain. It can also be set to the relative path wdt, and then the built-in WDT scripts will be used instead. An absolute path is also supported to point to an arbitrary directory in the file system. The built-in scripts can be replaced by the user-provided scripts or model files as long as those files are in the specified directory. Files in this directory are put into a Kubernetes config map, which in turn is mounted to the createDomainScriptsMountPath, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. wlst createDomainScriptsMountPath Mount path where the create domain scripts are located inside a pod. The create-domain.sh script creates a Kubernetes job to run the script (specified in the createDomainScriptName property) in a Kubernetes pod to create a domain home. Files in the createDomainFilesDir directory are mounted to this location in the pod, so that the Kubernetes pod can use the scripts and supporting files to create a domain home. /u01/weblogic createDomainScriptName Script that the create domain script uses to create a WebLogic domain. The create-domain.sh script creates a Kubernetes job to run this script to create a domain home. The script is located in the in-pod directory that is specified in the createDomainScriptsMountPath property. If you need to provide your own scripts to create the domain home, instead of using the built-it scripts, you must use this property to set the name of the script that you want the create domain job to run. create-domain-job.sh domainHome Home directory of the OIG domain. If not specified, the value is derived from the domainUID as /shared/domains/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/oimcluster domainPVMountPath Mount path of the domain persistent volume. /u01/oracle/user_projects/domains domainUID Unique ID that will be used to identify this particular domain. Used as the name of the generated WebLogic domain as well as the name of the Kubernetes domain resource. This ID must be unique across all domains in a Kubernetes cluster. This ID cannot contain any character that is not valid in a Kubernetes service name. oimcluster exposeAdminNodePort Boolean indicating if the Administration Server is exposed outside of the Kubernetes cluster. false exposeAdminT3Channel Boolean indicating if the T3 administrative channel is exposed outside the Kubernetes cluster. true image OIG container image. The operator requires OIG 12.2.1.4. Refer to OIG domains for details on how to obtain or create the image. oracle/oig:12.2.1.4.0 imagePullPolicy WebLogic container image pull policy. Legal values are IfNotPresent, Always, or Never IfNotPresent imagePullSecretName Name of the Kubernetes secret to access the container registry to pull the OIG container image. The presence of the secret will be validated when this parameter is specified. includeServerOutInPodLog Boolean indicating whether to include the server .out to the pod\u0026rsquo;s stdout. true initialManagedServerReplicas Number of Managed Servers to initially start for the domain. 2 javaOptions Java options for starting the Administration Server and Managed Servers. A Java option can have references to one or more of the following pre-defined variables to obtain WebLogic domain information: $(DOMAIN_NAME), $(DOMAIN_HOME), $(ADMIN_NAME), $(ADMIN_PORT), and $(SERVER_NAME). -Dweblogic.StdoutDebugEnabled=false logHome The in-pod location for the domain log, server logs, server out, and Node Manager log files. If not specified, the value is derived from the domainUID as /shared/logs/\u0026lt;domainUID\u0026gt;. /u01/oracle/user_projects/domains/logs/oimcluster managedServerNameBase Base string used to generate Managed Server names. oim_server managedServerPort Port number for each Managed Server. 8001 namespace Kubernetes namespace in which to create the domain. oimcluster persistentVolumeClaimName Name of the persistent volume claim created to host the domain home. If not specified, the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-sample-pvc. oimcluster-domain-pvc productionModeEnabled Boolean indicating if production mode is enabled for the domain. true serverStartPolicy Determines which WebLogic Server instances will be started. Legal values are NEVER, IF_NEEDED, ADMIN_ONLY. IF_NEEDED t3ChannelPort Port for the T3 channel of the NetworkAccessPoint. 30012 t3PublicAddress Public address for the T3 channel. This should be set to the public address of the Kubernetes cluster. This would typically be a load balancer address. For development environments only: In a single server (all-in-one) Kubernetes deployment, this may be set to the address of the master, or at the very least, it must be set to the address of one of the worker nodes. If not provided, the script will attempt to set it to the IP address of the Kubernetes cluster weblogicCredentialsSecretName Name of the Kubernetes secret for the Administration Server\u0026rsquo;s user name and password. If not specified, then the value is derived from the domainUID as \u0026lt;domainUID\u0026gt;-weblogic-credentials. oimcluster-domain-credentials weblogicImagePullSecretName Name of the Kubernetes secret for the container registry, used to pull the WebLogic Server image. serverPodCpuRequest, serverPodMemoryRequest, serverPodCpuCLimit, serverPodMemoryLimit The maximum amount of compute resources allowed, and minimum amount of compute resources required, for each server pod. Please refer to the Kubernetes documentation on Managing Compute Resources for Containers for details. Resource requests and resource limits are not specified. rcuSchemaPrefix The schema prefix to use in the database, for example OIGK8S. You may wish to make this the same as the domainUID in order to simplify matching domains to their RCU schemas. OIGK8S rcuDatabaseURL The database URL. oracle-db.default.svc.cluster.local:1521/devpdb.k8s rcuCredentialsSecret The Kubernetes secret containing the database credentials. oimcluster-rcu-credentials frontEndHost The entry point URL for the OIM. Not set frontEndPort The entry point port for the OIM. Not set Note that the names of the Kubernetes resources in the generated YAML files may be formed with the value of some of the properties specified in the create-inputs.yaml file. Those properties include the adminServerName, clusterName and managedServerNameBase. If those values contain any characters that are invalid in a Kubernetes service name, those characters are converted to valid values in the generated YAML files. For example, an uppercase letter is converted to a lowercase letter and an underscore (\u0026quot;_\u0026quot;) is converted to a hyphen (\u0026quot;-\u0026quot;).\nThe sample demonstrates how to create an OIG domain home and associated Kubernetes resources for a domain that has one cluster only. In addition, the sample provides the capability for users to supply their own scripts to create the domain home for other use cases. The generated domain YAML file could also be modified to cover more use cases.\nRun the create domain script Generate the create domain script Run the create domain script, specifying your inputs file and an output directory to store the generated artifacts:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o /\u0026lt;path to output-directory\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv $ mkdir output $ ./create-domain.sh -i create-domain-inputs.yaml -o output The output will look similar to the following:\nInput parameters being used export version=\u0026quot;create-weblogic-sample-domain-inputs-v1\u0026quot; export adminPort=\u0026quot;7001\u0026quot; export adminServerName=\u0026quot;AdminServer\u0026quot; export domainUID=\u0026quot;governancedomain\u0026quot; export domainHome=\u0026quot;/u01/oracle/user_projects/domains/governancedomain\u0026quot; export serverStartPolicy=\u0026quot;IF_NEEDED\u0026quot; export clusterName=\u0026quot;oim_cluster\u0026quot; export configuredManagedServerCount=\u0026quot;5\u0026quot; export initialManagedServerReplicas=\u0026quot;1\u0026quot; export managedServerNameBase=\u0026quot;oim_server\u0026quot; export managedServerPort=\u0026quot;14000\u0026quot; export image=\u0026quot;container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359\u0026quot; export imagePullPolicy=\u0026quot;IfNotPresent\u0026quot; export imagePullSecretName=\u0026quot;orclcred\u0026quot; export productionModeEnabled=\u0026quot;true\u0026quot; export weblogicCredentialsSecretName=\u0026quot;oig-domain-credentials\u0026quot; export includeServerOutInPodLog=\u0026quot;true\u0026quot; export logHome=\u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain\u0026quot; export t3ChannelPort=\u0026quot;30012\u0026quot; export exposeAdminT3Channel=\u0026quot;false\u0026quot; export adminNodePort=\u0026quot;30701\u0026quot; export exposeAdminNodePort=\u0026quot;false\u0026quot; export namespace=\u0026quot;oigns\u0026quot; javaOptions=-Dweblogic.StdoutDebugEnabled=false export persistentVolumeClaimName=\u0026quot;governancedomain-domain-pvc\u0026quot; export domainPVMountPath=\u0026quot;/u01/oracle/user_projects/domains\u0026quot; export createDomainScriptsMountPath=\u0026quot;/u01/weblogic\u0026quot; export createDomainScriptName=\u0026quot;create-domain-job.sh\u0026quot; export createDomainFilesDir=\u0026quot;wlst\u0026quot; export rcuSchemaPrefix=\u0026quot;OIGK8S\u0026quot; export rcuDatabaseURL=\u0026quot;mydatabasehost.example.com:1521/orcl.example.com\u0026quot; export rcuCredentialsSecret=\u0026quot;oig-rcu-credentials\u0026quot; export frontEndHost=\u0026quot;example.com\u0026quot; export frontEndPort=\u0026quot;14100\u0026quot; Generating output/weblogic-domains/governancedomain/create-domain-job.yaml Generating output/weblogic-domains/governancedomain/delete-domain-job.yaml Generating output/weblogic-domains/governancedomain/domain.yaml Checking to see if the secret governancedomain-domain-credentials exists in namespace oigns configmap/governancedomain-create-fmw-infra-sample-domain-job-cm created Checking the configmap governancedomain-create-fmw-infra-sample-domain-job-cm was created configmap/governancedomain-create-fmw-infra-sample-domain-job-cm labeled Checking if object type job with name governancedomain-create-fmw-infra-sample-domain-job exists No resources found in oigns namespace. Creating the domain by creating the job output/weblogic-domains/governancedomain/create-domain-job.yaml job.batch/governancedomain-create-fmw-infra-sample-domain-job created Waiting for the job to complete... status on iteration 1 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 2 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 3 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 4 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 5 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 6 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 7 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 8 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 9 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 10 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Running status on iteration 11 of 40 pod governancedomain-create-fmw-infra-sample-domain-job-8cww8 status is Completed Domain governancedomain was created and will be started by the WebLogic Kubernetes Operator The following files were generated: output/weblogic-domains/governancedomain/create-domain-inputs.yaml output/weblogic-domains/governancedomain/create-domain-job.yaml output/weblogic-domains/governancedomain/domain.yaml sed Completed $ Note: If the create domain script creation fails, refer to the Troubleshooting section.\n Setting the OIM server memory parameters Navigate to the /output/weblogic-domains/\u0026lt;domain_uid\u0026gt; directory:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Edit the domain_oim_soa.yaml and locate the section of the file starting with: - clusterName: oim_cluster. Immediately after the line: topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot;, add the following lines:\nenv: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m\u0026quot; The file should looks as follows:\n- clusterName: oim_cluster serverService: precreateService: true serverStartState: \u0026quot;RUNNING\u0026quot; serverPod: # Instructs Kubernetes scheduler to prefer nodes for new cluster members where there are not # already members of the same cluster. affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - weight: 100 podAffinityTerm: labelSelector: matchExpressions: - key: \u0026quot;weblogic.clusterName\u0026quot; operator: In values: - $(CLUSTER_NAME) topologyKey: \u0026quot;kubernetes.io/hostname\u0026quot; env: - name: USER_MEM_ARGS value: \u0026quot;-Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m\u0026quot; replicas: 1 ... Run the create domain scripts Create the Kubernetes resource using the following command:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/\u0026lt;domain_uid\u0026gt; $ kubectl apply -f domain.yaml For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain $ kubectl apply -f domain.yaml The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain created Run the following command to view the status of the OIG pods:\n$ kubectl get pods -n oigns The output will initially look similar to the following:\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3h30m governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 27m governancedomain-introspect-domain-job-p4brt 1/1 Running 0 6s The introspect-domain-job pod will be displayed first. Run the command again after several minutes and check to see that the Administration Server and SOA Server are both started. When started they should have STATUS = Running and READY = 1/1.\nNAME READY STATUS RESTARTS AGE helper 1/1 Running 0 3h38m governancedomain-adminserver 1/1 Running 0 7m30s governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 35m governancedomain-soa-server1 1/1 Running 0 4m Note: It will take several minutes before all the pods listed above show. When a pod has a STATUS of 0/1 the pod is started but the OIG server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs governancedomain-adminserver -n oigns $ kubectl logs governancedomain-soa-server1 -n oigns Once both pods are running, start the OIM Server using the following command:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain/ $ kubectl apply -f domain_oim_soa.yaml For example:\n$ cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain/ $ kubectl apply -f domain_oim_soa.yaml The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain configured Verify the results Verify the domain, pods and services Verify the domain, servers pods and services are created and in the READY state with a STATUS of 1/1, by running the following command:\n$ kubectl get all,domains -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get all,domains -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/governancedomain-adminserver 1/1 Running 0 16m pod/governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 36m pod/governancedomain-oim-server1 1/1 Running 0 5m57s pod/governancedomain-soa-server1 1/1 Running 0 13m pod/helper 1/1 Running 0 3h40m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/governancedomain-adminserver ClusterIP None \u0026lt;none\u0026gt; 7001/TCP 16m service/governancedomain-cluster-oim-cluster ClusterIP 10.97.121.159 \u0026lt;none\u0026gt; 14000/TCP 13m service/governancedomain-cluster-soa-cluster ClusterIP 10.111.231.242 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-oim-server1 ClusterIP None \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server2 ClusterIP 10.108.139.30 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server3 ClusterIP 10.97.170.104 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server4 ClusterIP 10.99.82.214 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-oim-server5 ClusterIP 10.98.75.228 \u0026lt;none\u0026gt; 14000/TCP 5m57s service/governancedomain-soa-server1 ClusterIP None \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server2 ClusterIP 10.107.232.220 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server3 ClusterIP 10.108.203.6 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server4 ClusterIP 10.96.178.0 \u0026lt;none\u0026gt; 8001/TCP 13m service/governancedomain-soa-server5 ClusterIP 10.107.83.62 \u0026lt;none\u0026gt; 8001/TCP 13m NAME COMPLETIONS DURATION AGE job.batch/governancedomain-create-fmw-infra-sample-domain-job 1/1 5m30s 36m NAME AGE domain.weblogic.oracle/governancedomain 17m Note: It will take several minutes before all the services listed above show. While the governancedomain-oim-server1 pod has a STATUS of 0/1 the pod is started but the OIG server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs governancedomain-oim-server1 -n oigns The default domain created by the script has the following characteristics:\n An Administration Server named AdminServer listening on port 7001. A configured OIG cluster named oig_cluster of size 5. A configured SOA cluster named soa_cluster of size 5. One started OIG managed Server, named oim_server1, listening on port 14000. One started SOA managed Server, named soa_server1, listening on port 8001. Log files that are located in \u0026lt;persistent_volume\u0026gt;/logs/\u0026lt;domainUID\u0026gt; Verify the domain Run the following command to describe the domain:\n$ kubectl describe domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe domain governancedomain -n oigns The output will look similar to the following:\nName: governancedomain Namespace: oigns Labels: weblogic.domainUID=governancedomain Annotations: \u0026lt;none\u0026gt; API Version: weblogic.oracle/v8 Kind: Domain Metadata: Creation Timestamp: 2022-03-10T11:44:17Z Generation: 2 Managed Fields: API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:metadata: f:annotations: .: f:kubectl.kubernetes.io/last-applied-configuration: f:labels: .: f:weblogic.domainUID: Manager: kubectl-client-side-apply Operation: Update Time: 2022-03-10T14:59:44Z API Version: weblogic.oracle/v8 Fields Type: FieldsV1 fieldsV1: f:status: .: f:clusters: f:conditions: f:introspectJobFailureCount: f:servers: f:startTime: Manager: Kubernetes Java Client Operation: Update Time: 2022-03-10T11:51:12Z Resource Version: 383381 UID: ea95c549-c414-42a6-8de4-beaf1204872e Spec: Admin Server: Server Pod: Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms512m -Xmx1024m Server Start State: RUNNING Clusters: Cluster Name: soa_cluster Replicas: 1 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Server Service: Precreate Service: true Server Start State: RUNNING Cluster Name: oim_cluster Replicas: 1 Server Pod: Affinity: Pod Anti Affinity: Preferred During Scheduling Ignored During Execution: Pod Affinity Term: Label Selector: Match Expressions: Key: weblogic.clusterName Operator: In Values: $(CLUSTER_NAME) Topology Key: kubernetes.io/hostname Weight: 100 Env: Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms2408m -Xmx8192m Server Service: Precreate Service: true Server Start State: RUNNING Data Home: Domain Home: /u01/oracle/user_projects/domains/governancedomain Domain Home Source Type: PersistentVolume Http Access Log In Log Home: true Image: container-registry.oracle.com/middleware/oig_cpu:12.2.1.4-jdk8-ol7-220120.1359 Image Pull Policy: IfNotPresent Image Pull Secrets: Name: orclcred Include Server Out In Pod Log: true Log Home: /u01/oracle/user_projects/domains/logs/governancedomain Log Home Enabled: true Server Pod: Env: Name: JAVA_OPTIONS Value: -Dweblogic.StdoutDebugEnabled=false Name: USER_MEM_ARGS Value: -Djava.security.egd=file:/dev/./urandom -Xms256m -Xmx1024m Volume Mounts: Mount Path: /u01/oracle/user_projects/domains Name: weblogic-domain-storage-volume Volumes: Name: weblogic-domain-storage-volume Persistent Volume Claim: Claim Name: governancedomain-domain-pvc Server Start Policy: IF_NEEDED Web Logic Credentials Secret: Name: oig-domain-credentials Status: Clusters: Cluster Name: oim_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Cluster Name: soa_cluster Maximum Replicas: 5 Minimum Replicas: 0 Ready Replicas: 1 Replicas: 1 Replicas Goal: 1 Conditions: Last Transition Time: 2022-03-10T11:59:53.249700Z Reason: ServersReady Status: True Type: Available Introspect Job Failure Count: 0 Servers: Desired State: RUNNING Health: Activation Time: 2022-03-10T11:46:49.874000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.40.59 Server Name: AdminServer State: RUNNING Cluster Name: oim_cluster Desired State: RUNNING Health: Activation Time: 2022-03-10T15:06:21.693000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.40.59 Server Name: oim_server1 State: RUNNING Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server2 Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server3 Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server4 Cluster Name: oim_cluster Desired State: SHUTDOWN Server Name: oim_server5 Cluster Name: soa_cluster Desired State: RUNNING Health: Activation Time: 2022-03-10T11:49:26.340000Z Overall Health: ok Subsystems: Subsystem Name: ServerRuntime Symptoms: Node Name: 10.250.40.59 Server Name: soa_server1 State: RUNNING Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server2 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server3 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server4 Cluster Name: soa_cluster Desired State: SHUTDOWN Server Name: soa_server5 Start Time: 2022-03-10T14:50:19.148541Z Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal DomainCreated 19m weblogic.operator Domain resource governancedomain was created Normal DomainProcessingCompleted 12m weblogic.operator Successfully completed processing domain resource governancedomain Normal DomainChanged 10m weblogic.operator Domain resource governancedomain was changed Normal DomainProcessingStarting 10m (x2 over 19m) weblogic.operator Creating or updating Kubernetes presence for WebLogic Domain with UID governancedomai In the Status section of the output, the available servers and clusters are listed.\n Verify the pods Run the following command to see the pods running the servers and which nodes they are running on:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; -o wide For example:\n$ kubectl get pods -n oigns -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES helper 1/1 Running 0 3h50m 10.244.1.39 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-adminserver 1/1 Running 0 27m 10.244.1.42 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 47m 10.244.1.40 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-oim-server1 1/1 Running 0 16m 10.244.1.44 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; governancedomain-soa-server1 1/1 Running 0 24m 10.244.1.43 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; You are now ready to configure an Ingress to direct traffic for your OIG domain as per Configure an ingress for an OIG domain.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/create-oid-instances/", + "title": "Create Oracle Internet Directory Instances", + "tags": [], + "description": "This document provides details of the oid Helm chart.", + "content": " Introduction Create a Kubernetes namespace Create a Kubernetes secret for the container registry Create a persistent volume directory The oid Helm chart Create OID instances Helm command output Verify the OID deployment Undeploy an OID deployment Appendix: Configuration parameters Introduction This chapter demonstrates how to deploy Oracle Internet Directory (OID) 12c instance(s) using the Helm package manager for Kubernetes.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Internet Directory Instances Services for interfaces exposed through Oracle Internet Directory Instances Ingress configuration Create a Kubernetes namespace Create a Kubernetes namespace for the OID deployment by running the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace oidns The output will look similar to the following:\nnamespace/oidns created Create a Kubernetes secret for the container registry In this section you create a secret that stores the credentials for the container registry where the OID image is stored. This step must be followed if using Oracle Container Registry or your own private registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oudns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OID container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oid_cpu and accept the license agreement.\n If using your own container registry to store the OID container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created Create a persistent volume directory As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nMake sure the persistent volume path has full access permissions, and that the folder is empty. In this example /scratch/shared/ is a shared directory accessible from all nodes.\n On the master node run the following command to create a user_projects directory:\n$ cd \u0026lt;persistent_volume\u0026gt; $ mkdir oid_user_projects $ chmod 777 oid_user_projects For example:\n$ cd /scratch/shared $ mkdir oid_user_projects $ chmod 777 oid_user_projects On the master node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oid_user_projects $ touch file.txt $ ls filemaster.txt For example:\n$ cd /scratch/shared/oid_user_projects $ touch filemaster.txt $ ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd /scratch/shared/oid_user_projects $ ls filemaster.txt $ touch fileworker1.txt $ ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n The oid Helm chart The \u0026lsquo;oid\u0026rsquo; Helm chart allows you to create Oracle Internet Directory instances along with Kubernetes objects in a specified namespace using the oid Helm Chart.\nThe deployment can be initiated by running the following Helm command with reference to the oid Helm chart, along with configuration parameters according to your environment.\ncd $WORKDIR/kubernetes/helm $ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nNote: The examples in Create OID instances below provide values which allow the user to override the default values provided by the Helm chart. A full list of configuration parameters and their default values is shown in Appendix: Configuration parameters.\nFor more details about the helm command and parameters, please execute helm --help and helm install --help.\nCreate OID instances You can create OID instances using one of the following methods:\n Using a YAML file Using --set argument Note: It is not possible to install sample data or load an ldif file during the OID deployment. In order to load data in OID, create the OID deployment and then use ldapmodify post the ingress deployment. See Using LDAP utilities.\nUsing a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create an oid-values-override.yaml as follows:\nimage: repository: \u0026lt;image\u0026gt; tag: \u0026lt;tag\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oidConfig: realmDN: \u0026lt;baseDN\u0026gt; domainName: \u0026lt;domainName\u0026gt; orcladminPassword: \u0026lt;password\u0026gt; dbUser: sys dbPassword: \u0026lt;sys_password\u0026gt; dbschemaPassword: \u0026lt;password\u0026gt; rcuSchemaPrefix: \u0026lt;rcu_prefix\u0026gt; rcuDatabaseURL: \u0026lt;db_hostname\u0026gt;:\u0026lt;dp_port\u0026gt;/\u0026lt;db_service\u0026gt; sslwalletPassword: \u0026lt;password\u0026gt; persistence: type: filesystem filesystem: hostPath:: path: \u0026lt;persistent_volume\u0026gt;/oid_user_projects odsm: adminUser: weblogic adminPassword: \u0026lt;password\u0026gt; For example:\nimage: repository: container-registry.oracle.com/middleware/oid_cpu tag: 12.2.1.4-jdk8-ol7-220223.1744 pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oidConfig: realmDN: dc=oid,dc=example,dc=com domainName: oid_domain orcladminPassword: \u0026lt;password\u0026gt; dbUser: sys dbPassword: \u0026lt;password\u0026gt; dbschemaPassword: \u0026lt;password\u0026gt; rcuSchemaPrefix: OIDK8S rcuDatabaseURL: oiddb.example.com:1521/oiddb.example.com sslwalletPassword: \u0026lt;password\u0026gt; persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oid_user_projects odsm: adminUser: weblogic adminPassword: \u0026lt;password\u0026gt; The following caveats exist:\n \u0026lt;baseDN\u0026gt; should be set to the value for the base DN to be created.\n \u0026lt;domainName\u0026gt; should be set to the value for the domain name to be created.\n rcuDatabaseURL, dbUser and dbPassword should be set to the relevant values for the database created as per Prerequisites.\n rcuSchemaPrefix and dbschemaPassword should be set to a value of your choice. This creates the OID schema in the database.\n Replace \u0026lt;password\u0026gt; with a the relevant passwords.\n If you are not using Oracle Container Registry or your own container registry for your OID container image, then you can remove the following:\nimagePullSecrets: - name: orclcred If using NFS for your persistent volume the change the persistence section as follows:\npersistence: type: networkstorage networkstorage: nfs: path: \u0026lt;persistent_volume\u0026gt;/oud_user_projects server: \u0026lt;NFS IP address\u0026gt; Run the following to create the OID instances:\nhelm install --namespace \u0026lt;namespace\u0026gt; --values oid-values-override.yaml release name\u0026gt; oid For example:\nhelm install --namespace oidns --values oid-values-override.yaml oid oid Check the OID deployment as per Verify the OID deployment.\n Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to create OUDSM instance:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --set oidConfig.realmDN=\u0026#34;\u0026lt;baseDN\u0026gt;\u0026#34;,oidConfig.domainName=\u0026lt;domainName\u0026gt;,oidConfig.orcladminPassword=\u0026lt;password\u0026gt; \\ --set oidConfig.dbUser=sys,oidConfig.dbPassword=\u0026lt;password\u0026gt;,oidConfig.dbschemaPassword=\u0026lt;password\u0026gt; \\ --set oidConfig.rcuSchemaPrefix=\u0026#34;\u0026lt;rcu_prefix\u0026gt;\u0026#34;,oidConfig.rcuDatabaseURL=\u0026#34;\u0026lt;db_hostname\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;db_service\u0026gt;\u0026#34;,oidConfig.sslwalletPassword=\u0026lt;password\u0026gt; \\ --set persistence.filesystem.hostPath.path=\u0026lt;persistent_volume\u0026gt;/oid_user_projects \\ --set image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set odsm.adminUser=weblogic,odsm.adminPassword=\u0026lt;password\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release name\u0026gt; oid For example:\n$ helm install --namespace oidns \\ --set oidConfig.realmDN=\u0026#34;dc=oid,dc=example,dc=com\u0026#34;,oidConfig.domainName=oid_domain,oidConfig.orcladminPassword=\u0026lt;password\u0026gt; \\ --set oidConfig.dbUser=sys,oidConfig.dbPassword=\u0026lt;password\u0026gt;,oidConfig.dbschemaPassword=\u0026lt;password\u0026gt; \\ --set oidConfig.rcuSchemaPrefix=\u0026#34;OIDK8S\u0026#34;,oidConfig.rcuDatabaseURL=\u0026#34;oiddb.example.com:1521/oiddb.example.com\u0026#34;,oidConfig.sslwalletPassword=\u0026lt;password\u0026gt; \\ --set persistence.filesystem.hostPath.path=/scratch/shared/oid_user_projects \\ --set image.repository=container-registry.oracle.com/middleware/oid_cpu,image.tag=12.2.1.4-jdk8-ol7-220223.1744 \\ --set odsm.adminUser=weblogic,odsm.adminPassword=\u0026lt;password\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oid oid The following caveats exist:\n \u0026lt;baseDN\u0026gt; should be set to the value for the base DN to be created. \u0026lt;domainName\u0026gt; should be set to the value for the domain name to be created. rcuDatabaseURL, dbUser and dbPassword should be set to the relevant values for the database created as per Prerequisites. rcuSchemaPrefix and dbschemaPassword should be set to a value of your choice. This creates the OID schema in the database. Replace \u0026lt;password\u0026gt; with a the relevant passwords. If you are not using Oracle Container Registry or your own container registry for your OID container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot; If using using NFS for your persistent volume then use persistence.networkstorage.nfs.path=\u0026lt;persistent_volume\u0026gt;/oid_user_projects,persistence.networkstorage.nfs.server:\u0026lt;NFS IP address\u0026gt; Check the OID deployment as per Verify the OID deployment.\n Helm command output In all the examples above, the following output is shown following a successful execution of the helm install command.\nNAME: oid LAST DEPLOYED: Fri Mar 25 09:43:25 2022 NAMESPACE: oidns STATUS: deployed REVISION: 1 TEST SUITE: None Verify the OID deployment Run the following command to verify the OID deployment:\nCommand:\n$ kubectl --namespace oidns get all For example:\n$ kubectl --namespace oidns get all The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/oidhost1 1/1 Running 0 35m pod/oidhost2 1/1 Running 0 35m NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oid-lbr-ldap ClusterIP 10.110.118.113 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 35m service/oidhost1 ClusterIP 10.97.17.125 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP,7001/TCP,7002/TCP 35m service/oidhost2 ClusterIP 10.106.32.187 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 35m Note: If the OID deployment fails refer to Troubleshooting for instructions on how to view pod logs or describe the pod. Once the problem is identified follow Undeploy an OID deployment to clean down the deployment before deploying again.\nKubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Secret \u0026lt;deployment/release name\u0026gt;-creds oid-creds Secret object for Oracle Internet Directory related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oid-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oid-pvc Persistent Volume Claim for user_projects mount. Pod \u0026lt;deployment/release name\u0026gt;1 oidhost1 Pod/Container for base Oracle Internet Directory Instance which would be populated first with base configuration (like number of sample entries) Pod \u0026lt;deployment/release name\u0026gt;N oidhost2, oidhost3, \u0026hellip; Pod(s)/Container(s) for Oracle Internet Directory Instances Service \u0026lt;deployment/release name\u0026gt;lbr-ldap oid-lbr-ldap Service for LDAP/LDAPS access load balanced across the base Oracle Internet Directory instances Service \u0026lt;deployment/release name\u0026gt; oidhost1, oidhost2, oidhost3, \u0026hellip; Service for LDAP/LDAPS access for each base Oracle Internet Directory instance Ingress \u0026lt;deployment/release name\u0026gt;-ingress-nginx oid-ingress-nginx Ingress Rules for LDAP/LDAPS access. In the table above the \u0026lsquo;Example Name\u0026rsquo; for each Object is based on the value \u0026lsquo;oid\u0026rsquo; as deployment/release name for the Helm chart installation. Ingress Configuration With OID instance(s) now deployed you are now ready to configure an ingress controller to direct traffic to OID as per Configure an ingress for an OID.\nUndeploy an OID deployment Remove OID schemas from the database Note: These steps must be performed if cleaning down a failed install. Failure to do so will cause any new OID deployment to fail.\nTo remove the OID schemas from the database:\n Run the following to enter a bash session in an oid pod:\n$ kubectl exec -ti \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- bash For example:\n$ kubectl exec -ti oidhost2 -n oidns -- bash This will take you into a bash session in the pod:\n[oracle@oidhost2 oracle]$ Inside the container drop the RCU schemas as follows:\n[oracle@oidhost2 oracle]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@oidhost2 oracle]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@oidhost2 oracle]$ export DB_USER=sys [oracle@oidhost2 oracle]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt ${ORACLE_HOME}/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString ${CONNECTION_STRING} \\ -dbUser ${DB_USER} -dbRole sysdba -selectDependentsForComponents true -schemaPrefix ${RCUPREFIX} \\ -component MDS -component OPSS -component STB -component OID -component IAU -component WLS -f \u0026lt; /tmp/pwd.txt where:\n \u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; is your database connect string \u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix \u0026lt;db_pwd\u0026gt; is the SYS password for the database \u0026lt;rcu_schema_pwd\u0026gt; is the password for the \u0026lt;rcu_schema_prefix\u0026gt; For example:\n[oracle@oidhost2 oracle]$ export CONNECTION_STRING=oiddb.example.com:1521/oiddb.example.com [oracle@oidhost2 oracle]$ export RCUPREFIX=OIDK8S [oracle@oidhost2 oracle]$ export DB_USER=sys [oracle@oidhost2 oracle]$ echo -e \u0026lt;password\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;password\u0026gt; \u0026gt; /tmp/pwd.txt ${ORACLE_HOME}/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString ${CONNECTION_STRING} \\ -dbUser ${DB_USER} -dbRole sysdba -selectDependentsForComponents true -schemaPrefix ${RCUPREFIX} \\ -component MDS -component OPSS -component STB -component OID -component IAU -component WLS -f \u0026lt; /tmp/pwd.txt The output will look similar to the following:\nRCU Logfile: /tmp/RCU2022-03-28_10-08_535715154/logs/rcu.log Processing command line .... Repository Creation Utility - Checking Prerequisites Checking Global Prerequisites Repository Creation Utility - Checking Prerequisites Checking Component Prerequisites Repository Creation Utility - Drop Repository Drop in progress. Percent Complete: 2 Percent Complete: 13 Percent Complete: 15 Dropping Audit Services(IAU) Percent Complete: 23 Percent Complete: 29 Percent Complete: 44 Percent Complete: 45 Dropping Oracle Internet Directory(OID) Percent Complete: 46 etc.. etc.. Dropping Audit Services Viewer(IAU_VIEWER) Dropping Audit Services Append(IAU_APPEND) Dropping Common Infrastructure Services(STB) Dropping tablespaces in the repository database Repository Creation Utility: Drop - Completion Summary Database details: ----------------------------- Host Name : oiddb.example.com Port : 1521 Service Name : oiddb.example.com Connected As : sys Prefix for (prefixable) Schema Owners : OIDK8S Prefix for (non-prefixable) Schema Owners : DEFAULT_PREFIX RCU Logfile : /tmp/RCU2022-03-28_10-08_535715154/logs/rcu.log Component schemas dropped: ----------------------------- Component Status Logfile Common Infrastructure Services Success /tmp/RCU2022-03-28_10-08_535715154/logs/stb.log Oracle Platform Security Services Success /tmp/RCU2022-03-28_10-08_535715154/logs/opss.log Oracle Internet Directory Success /tmp/RCU2022-03-28_10-08_535715154/logs/oid.log Audit Services Success /tmp/RCU2022-03-28_10-08_535715154/logs/iau.log Audit Services Append Success /tmp/RCU2022-03-28_10-08_535715154/logs/iau_append.log Audit Services Viewer Success /tmp/RCU2022-03-28_10-08_535715154/logs/iau_viewer.log Metadata Services Success /tmp/RCU2022-03-28_10-08_535715154/logs/mds.log WebLogic Services Success /tmp/RCU2022-03-28_10-08_535715154/logs/wls.log Repository Creation Utility - Drop : Operation Completed Delete the OID deployment Find the deployment release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oidns list The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oid oidns 2 2022-03-21 16:46:34.05531056 +0000 UTC deployed oid-0.1 12.2.1.4.0 Delete the deployment using the following command:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oidns oid The output will look similar to the following:\nrelease \u0026quot;oid\u0026quot; uninstalled Delete the persistent volume contents Delete the contents of the oid_user_projects directory in the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oid_user_projects $ rm -rf * For example:\n$ cd /scratch/shared/oid_user_projects $ rm -rf * Appendix: Configuration Parameters The following table lists the configurable parameters of the oid chart and their default values.\n Parameter Description Default Value replicaCount Number of base Oracle Internet Directory instances/pods/services to be created. 1 restartPolicyName restartPolicy to be configured for each POD containing Oracle Internet Directory instance OnFailure image.repository Oracle Internet Directory Image Registry/Repository and name. Based on this, the image parameter will be configured for Oracle Internet Directory pods/containers oracle/oid image.tag Oracle Internet Directory Image Tag. Based on this, the image parameter will be configured for Oracle Internet Directory pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oid-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type Type of Service to be created for OID Interfaces (like LDAP, HTTP, Admin) ClusterIP service.lbrtype Service Type for loadbalancer services exposing LDAP, HTTP interfaces from available/accessible OID pods ClusterIP ingress.enabled true ingress.nginx.http.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.backendPort http ingress.nginx.http.nginxAnnotations { kubernetes.io/ingress.class: “nginx\u0026rdquo; } ingress.nginx.admin.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-admin.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.nginxAnnotations { kubernetes.io/ingress.class: “nginx” nginx.ingress.kubernetes.io/backend-protocol: “https\u0026rdquo;} ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject’s common name (cn) for SelfSigned Cert \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oid-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oid-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user /scratch/shared/oid_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oid_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided ReadWriteMany persistence.size Specifies the size of the storage 20Gi persistence.storageClass Specifies the storageclass of the persistence volume. manual persistence.annotations specifies any annotations that will be used { } secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through –set, –values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oid-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret opaque oidPorts.ldap Port on which Oracle Internet Directory Instance in the container should listen for LDAP Communication. 3060 oidPorts.ldaps Port on which Oracle Internet Directory Instance in the container should listen for LDAPS Communication. oidConfig.realmDN BaseDN for OID Instances oidConfig.domainName WebLogic Domain Name oid_domain oidConfig.domainHome WebLogic Domain Home /u01/oracle/user_projects/domains/oid_domain oidConfig.orcladminPassword Password for orcladmin user. Value will be added to Secret and Pod(s) will use the Secret oidConfig.dbUser Value for login into db usually sys. Value would be added to Secret and Pod(s) would be using the Secret oidConfig.dbPassword dbPassword is the SYS password for the database. Value would be added to Secret and Pod(s) would be using the Secret oidConfig.dbschemaPassword Password for DB Schema(s) to be created by RCU. Value would be added to Secret and Pod(s) would be using the Secret oidConfig.rcuSchemaPrefix The schema prefix to use in the database, for example OIDPD. oidConfig.rcuDatabaseURL The database URL. Sample: \u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; oidConfig.sleepBeforeConfig Based on the value for this parameter, initialization/configuration of each OID additional server (oid)n would be delayed and readiness probes would be configured. This is to make sure that OID additional servers (oid)n are initialized in sequence. 600 oidConfig.sslwalletPassword SSL enabled password to be used for ORAPKI deploymentConfig.startupTime Based on the value for this parameter, initialization/configuration of each OID additional servers (oid)n will be delayed and readiness probes would be configured. initialDelaySeconds would be configured as sleepBeforeConfig + startupTime 480 deploymentConfig.livenessProbeInitialDelay Parameter to decide livenessProbe initialDelaySeconds 900 baseOID Configuration for Base OID instance (oid1) baseOID.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to POD for Base OID Instance baseOID.envVars Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap additionalOID Configuration for additional OID instances (oidN) additionalOID.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to POD for additional OID Instance additionalOID.envVars List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap odsm Parameters/Configurations for ODSM Deployment odsm.adminUser Oracle WebLogic Server Administration User odsm.adminPassword Password for Oracle WebLogic Server Administration User odsm.startupTime Expected startup time. After specified seconds readinessProbe will start 900 odsmPorts Configuration for ODSM Ports odsmPorts.http ODSM HTTP Port 7001 odsmPorts.https ODSM HTTPS Port 7002 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/create-oud-instances/", + "title": "Create Oracle Unified Directory Instances", + "tags": [], + "description": "This document provides details of the oud-ds-rs Helm chart.", + "content": " Introduction Create a Kubernetes namespace Create a Kubernetes secret for the container registry Create a Kubernetes secret for cronjob images The oud-ds-rs Helm chart Create OUD instances Helm command output Verify the OUD deployment Verify the OUD replication Verify the cronjob Undeploy an OUD deployment Appendix: Configuration parameters Introduction This chapter demonstrates how to deploy Oracle Unified Directory (OUD) 12c instance(s) and replicated instances using the Helm package manager for Kubernetes.\nThe helm chart can be used to deploy an Oracle Unified Directory instance as a base, with configured sample entries, and multiple replicated Oracle Unified Directory instances/pods/services based on the specified replicaCount.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Unified Directory Instances Services for interfaces exposed through Oracle Unified Directory Instances Ingress configuration Create a Kubernetes namespace Create a Kubernetes namespace for the OUD deployment by running the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace oudns The output will look similar to the following:\nnamespace/oudns created Create a Kubernetes secret for the container registry Create a Kubernetes secret to stores the credentials for the container registry where the OUD image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\n$ kubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oudns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OUD container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oud_cpu and accept the license agreement.\n If using your own container registry to store the OUD container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created Create a Kubernetes secret for cronjob images Once OUD is deployed, if the Kubernetes node where the OUD pod(s) is/are running goes down after the pod eviction time-out, the pod(s) don\u0026rsquo;t get evicted but move to a Terminating state. The pod(s) will then remain in that state forever. To avoid this problem a cron-job is created during OUD deployment that checks for any pods in Terminating state, deletes them, and then starts the pod again. This cron job requires access to images on hub.docker.com. A Kubernetes secret must therefore be created to enable access to these images.\n Create a Kubernetes secret to access the required images on hub.docker.com:\nNote: You must first have a user account on hub.docker.com:\n$ kubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; --docker-username=\u0026#34;\u0026lt;docker_username\u0026gt;\u0026#34; --docker-password=\u0026lt;password\u0026gt; --docker-email=\u0026lt;docker_email_credentials\u0026gt; --namespace=\u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create secret docker-registry \u0026quot;dockercred\u0026quot; --docker-server=\u0026quot;https://index.docker.io/v1/\u0026quot; --docker-username=\u0026quot;username\u0026quot; --docker-password=\u0026lt;password\u0026gt; --docker-email=user@example.com --namespace=oudns The output will look similar to the following:\nsecret/dockercred created The oud-ds-rs Helm chart The oud-ds-rs Helm chart allows you to create or deploy a group of replicated Oracle Unified Directory instances along with Kubernetes objects in a specified namespace.\nThe deployment can be initiated by running the following Helm command with reference to the oud-ds-rs Helm chart, along with configuration parameters according to your environment.\n$ cd $WORKDIR/kubernetes/helm $ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nNote: The examples in Create OUD instances below provide values which allow the user to override the default values provided by the Helm chart. A full list of configuration parameters and their default values is shown in Appendix: Configuration parameters.\nFor more details about the helm command and parameters, please execute helm --help and helm install --help.\nCreate OUD instances You can create OUD instances using one of the following methods:\n Using a YAML file Using --set argument Note: While it is possible to install sample data during the OID deployment is it not possible to load your own data via an ldif file . In order to load data in OUD, create the OUD deployment and then use ldapmodify post the ingress deployment. See Using LDAP utilities.\nUsing a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create an oud-ds-rs-values-override.yaml as follows:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: rootUserPassword: \u0026lt;password\u0026gt; sampleData: \u0026#34;200\u0026#34; persistence: type: filesystem filesystem: hostPath: path: \u0026lt;persistent_volume\u0026gt;/oud_user_projects cronJob: kubectlImage: repository: bitnami/kubectl tag: \u0026lt;version\u0026gt; pullPolicy: IfNotPresent helmImage: repository: alpine/helm tag: \u0026lt;version\u0026gt; pullPolicy: IfNotPresent cronPersistence: enabled: true type: filesystem filesystem: hostPath: path: \u0026lt;$WORKDIR\u0026gt;/kubernetes/helm imagePullSecrets: - name: dockercred For example:\nimage: repository: container-registry.oracle.com/middleware/oud_cpu tag: 12.2.1.4-jdk8-ol7-220119.2051 pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudConfig: rootUserPassword: \u0026lt;password\u0026gt; sampleData: \u0026#34;200\u0026#34; persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oud_user_projects cronJob: kubectlImage: repository: bitnami/kubectl tag: 1.21.0 pullPolicy: IfNotPresent helmImage: repository: alpine/helm tag: 3.2.0 pullPolicy: IfNotPresent cronPersistence: enabled: true type: filesystem filesystem: hostPath: path: /scratch/shared/OUDContainer/fmw-kubernetes/OracleUnifiedDirectory/kubernetes/helm imagePullSecrets: - name: dockercred The following caveats exist:\n Replace \u0026lt;password\u0026gt; with the relevant password.\n sampleData: \u0026quot;200\u0026quot; will load 200 sample users into the default baseDN dc=example,dc=com. If you do not want sample data, remove this entry.\n The \u0026lt;version\u0026gt; in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.21.6 set to 1.21.0.\n The \u0026lt;version\u0026gt; in helmimage tag: should be set to the same version as your Helm version (helm version). For example if your helm version is 3.2.4 set to 3.2.0.\n The cronPersistence path must point to the helm charts directory on the persistent volume.\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred If using NFS for your persistent volume then change the persistence and `cronPersistence section as follows:\npersistence: type: networkstorage networkstorage: nfs: path: \u0026lt;persistent_volume\u0026gt;/oud_user_projects server: \u0026lt;NFS IP address\u0026gt; cronPersistence: enabled: true type: networkstorage networkstorage: nfs: path: \u0026lt;$WORKDIR\u0026gt;/kubernetes/helm server: \u0026lt;NFS_IP_Address\u0026gt; Run the following command to deploy OUD:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values oud-ds-rs-values-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs For example:\n$ helm install --namespace oudns \\ --values oud-ds-rs-values-override.yaml \\ oud-ds-rs oud-ds-rs Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.\n Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to create OUD instances:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --set oudConfig.rootUserPassword=\u0026lt;password\u0026gt;,persistence.filesystem.hostPath.path=\u0026lt;persistent_volume\u0026gt;/oud_user_projects,image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ --set sampleData=\u0026#34;200\u0026#34; \\ --set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=\u0026lt;version\u0026gt; \\ --set cronJob.helmImage.repository=alpine/helm,cronJob.helmImage.tag=\u0026lt;version\u0026gt; \\ --set cronJob.cronPersistence.filesystem.hostPath.path=\u0026lt;$WORKDIR\u0026gt;/kubernetes/helm \\ --set cronJob.imagePullSecrets[0].name=\u0026#34;dockercred\u0026#34; \\ \u0026lt;release_name\u0026gt; oud-ds-rs For example:\n$ helm install --namespace oudns \\ --set oudConfig.rootUserPassword=\u0026lt;password\u0026gt;,persistence.filesystem.hostPath.path=/scratch/shared/oud_user_projects,image.repository=container-registry.oracle.com/middleware/oud_cpu,image.tag=12.2.1.4-jdk8-ol7-220119.2051 \\ --set sampleData=\u0026#34;200\u0026#34; \\ --set cronJob.kubectlImage.repository=bitnami/kubectl,cronJob.kubectlImage.tag=1.21.0 \\ --set cronJob.helmImage.repository=alpine/helm,cronJob.helmImage.tag=3.2.0 \\ --set cronJob.cronPersistence.filesystem.hostPath.path=/scratch/shared/OUDContainer/fmw-kubernetes/OracleUnifiedDirectory/kubernetes/helm/ \\ --set cronJob.imagePullSecrets[0].name=\u0026#34;dockercred\u0026#34; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oud-ds-rs oud-ds-rs The following caveats exist:\n Replace \u0026lt;password\u0026gt; with a the relevant password. sampleData: \u0026quot;200\u0026quot; will load 200 sample users into the default baseDN dc=example,dc=com. If you do not want sample data, remove this entry. The \u0026lt;version\u0026gt; in kubectlImage tag: should be set to the same version as your Kubernetes version (kubectl version). For example if your Kubernetes version is 1.21.6 set to 1.21.0. The \u0026lt;version\u0026gt; in helmimage tag: should be set to the same version as your Helm version (helm version). For example if your helm version is 3.2.4 set to 3.2.0. The cronPersistence path must point to the helm charts directory on the persistent volume. If using using NFS for your persistent volume then use persistence.networkstorage.nfs.path=\u0026lt;persistent_volume\u0026gt;/oud_user_projects,persistence.networkstorage.nfs.server:\u0026lt;NFS IP address\u0026gt;. If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot;. Check the OUD deployment as per Verify the OUD deployment and Verify the OUD replication.\n Helm command output In all the examples above, the following output is shown following a successful execution of the helm install command.\nNAME: oud-ds-rs LAST DEPLOYED: Wed Mar 16 12:02:40 2022 NAMESPACE: oudns STATUS: deployed REVISION: 4 NOTES: # # Copyright (c) 2020, Oracle and/or its affiliates. # # Licensed under the Universal Permissive License v 1.0 as shown at # https://oss.oracle.com/licenses/upl # # Since \u0026#34;nginx\u0026#34; has been chosen, follow the steps below to configure nginx ingress controller. Add Repo reference to helm for retriving/installing Chart for nginx-ingress implementation. command-# helm repo add ingress-nginx https://kubernetes.github.io/ingress-nginx Command helm install to install nginx-ingress related objects like pod, service, deployment, etc. # helm install --namespace \u0026lt;namespace for ingress\u0026gt; --values nginx-ingress-values-override.yaml lbr-nginx ingress-nginx/ingress-nginx For details of content of nginx-ingress-values-override.yaml refer README.md file of this chart. Run these commands to check port mapping and services: # kubectl --namespace \u0026lt;namespace for ingress\u0026gt; get services -o wide -w lbr-nginx-ingress-controller # kubectl describe --namespace \u0026lt;namespace for oud-ds-rs chart\u0026gt; ingress.extensions/oud-ds-rs-http-ingress-nginx # kubectl describe --namespace \u0026lt;namespace for oud-ds-rs chart\u0026gt; ingress.extensions/oud-ds-rs-admin-ingress-nginx Accessible interfaces through ingress: (External IP Address for LoadBalancer NGINX Controller can be determined through details associated with lbr-nginx-ingress-controller) 1. OUD Admin REST: Port: http/https 2. OUD Data REST: Port: http/https 3. OUD Data SCIM: Port: http/https 4. OUD LDAP/LDAPS: Port: ldap/ldaps 5. OUD Admin LDAPS: Port: ldaps Please refer to README.md from Helm Chart to find more details about accessing interfaces and configuration parameters. Verify the OUD deployment Run the following command to verify the OUD deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 17m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 17m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 17m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs-0 ClusterIP 10.99.232.83 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m44s kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP 10.100.186.42 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP 10.104.55.53 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-http-0 ClusterIP 10.102.116.145 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.111.103.84 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.105.53.24 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-lbr-admin ClusterIP 10.98.39.206 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.110.77.132 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.111.55.122 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.108.155.81 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.104.88.44 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.105.253.120 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 NAME TYPE DATA AGE secret/default-token-tbjr5 kubernetes.io/service-account-token 3 25d secret/orclcred kubernetes.io/dockerconfigjson 1 3d secret/oud-ds-rs-creds opaque 8 8m48s secret/oud-ds-rs-token-cct26 kubernetes.io/service-account-token 3 8m50s secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 8m51s NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE persistentvolume/oud-ds-rs-pv 20Gi RWX Retain Bound oudns/oud-ds-rs-pvc manual 8m47s NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 8m48s NAME HOSTS ADDRESS PORTS AGE ingress.extensions/oud-ds-rs-admin-ingress-nginx oud-ds-rs-admin-0,oud-ds-rs-admin-1,oud-ds-rs-admin-2 + 2 more... 10.229.141.78 80 8m45s ingress.extensions/oud-ds-rs-http-ingress-nginx oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 3 more... 10.229.141.78 80 8m45s Note: It will take several minutes before all the services listed above show. While the oud-ds-rs pods have a STATUS of 0/1 the pod is started but the OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs oud-ds-rs-0 -n oudns $ kubectl logs oud-ds-rs-1 -n oudns $ kubectl logs oud-ds-rs-2 -n oudns Note : If the OUD deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). Once the problem is identified follow Undeploy an OUD deployment to clean down the deployment before deploying again.\nKubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Service Account \u0026lt;deployment/release name\u0026gt; oud-ds-rs Kubernetes Service Account for the Helm Chart deployment Secret \u0026lt;deployment/release name\u0026gt;-creds oud-ds-rs-creds Secret object for Oracle Unified Directory related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oud-ds-rs-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oud-ds-rs-pvc Persistent Volume Claim for user_projects mount. Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv-config oud-ds-rs-pv-config Persistent Volume for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc-config oud-ds-rs-pvc-config Persistent Volume Claim for mounting volume in containers for configuration files like ldif, schema, jks, java.security, etc. Pod \u0026lt;deployment/release name\u0026gt;-0 oud-ds-rs-0 Pod/Container for base Oracle Unified Directory Instance which would be populated first with base configuration (like number of sample entries) Pod \u0026lt;deployment/release name\u0026gt;-N oud-ds-rs-1, oud-ds-rs-2, \u0026hellip; Pod(s)/Container(s) for Oracle Unified Directory Instances - each would have replication enabled against base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-0 oud-ds-rs-0 Service for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-http-0 oud-ds-rs-http-0 Service for HTTP and HTTPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-ldap-0 oud-ds-rs-ldap-0 Service for LDAP and LDAPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-0 Service \u0026lt;deployment/release name\u0026gt;-N oud-ds-rs-1, oud-ds-rs-2, \u0026hellip; Service(s) for LDAPS Admin, REST Admin and Replication interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-http-N oud-ds-rs-http-1, oud-ds-rs-http-2, \u0026hellip; Service(s) for HTTP and HTTPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-ldap-N oud-ds-rs-ldap-1, oud-ds-rs-ldap-2, \u0026hellip; Service(s) for LDAP and LDAPS interfaces from base Oracle Unified Directory instance \u0026lt;deployment/release name\u0026gt;-N Service \u0026lt;deployment/release name\u0026gt;-lbr-admin oud-ds-rs-lbr-admin Service for LDAPS Admin, REST Admin and Replication interfaces from all Oracle Unified Directory instances Service \u0026lt;deployment/release name\u0026gt;-lbr-http oud-ds-rs-lbr-http Service for HTTP and HTTPS interfaces from all Oracle Unified Directory instances Service \u0026lt;deployment/release name\u0026gt;-lbr-ldap oud-ds-rs-lbr-ldap Service for LDAP and LDAPS interfaces from all Oracle Unified Directory instances Ingress \u0026lt;deployment/release name\u0026gt;-admin-ingress-nginx oud-ds-rs-admin-ingress-nginx Ingress Rules for HTTP Admin interfaces. Ingress \u0026lt;deployment/release name\u0026gt;-http-ingress-nginx oud-ds-rs-http-ingress-nginx Ingress Rules for HTTP (Data/REST) interfaces. In the table above the \u0026lsquo;Example Name\u0026rsquo; for each Object is based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as deployment/release name for the Helm chart installation. Verify the OUD replication Once all the PODs created are visible as READY (i.e. 1/1), you can verify your replication across multiple Oracle Unified Directory instances.\n To verify the replication group, connect to the container and issue an OUD administration command to show the details. The name of the container can be found by issuing the following:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; -o jsonpath=\u0026#39;{.items[*].spec.containers[*].name}\u0026#39; For example:\n$ kubectl get pods -n oudns -o jsonpath=\u0026#39;{.items[*].spec.containers[*].name}\u0026#39; The output will look similar to the following:\noud-ds-rs oud-ds-rs oud-ds-rs Once you have the container name you can verify the replication status in the following ways:\n Run dresplication inside the pod Using kubectl commands Run dresplication inside the pod Run the following command to create a bash shell in the pod:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- bash For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- bash This will take you into the pod:\n[oracle@oud-ds-rs-0 oracle]$ From the prompt, use the dsreplication command to check the status of your replication group:\n$ cd /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin $ ./dsreplication status --trustAll \\ --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections The output will look similar to the following. Enter credentials where prompted:\n\u0026gt;\u0026gt;\u0026gt;\u0026gt; Specify Oracle Unified Directory LDAP connection parameters Password for user \u0026#39;admin\u0026#39;: Establishing connections and reading configuration ..... Done. dc=example,dc=com - Replication Enabled ======================================= Server : Entries : M.C. [1] : A.O.M.C. [2] : Port [3] : Encryption [4] : Trust [5] : U.C. [6] : Status [7] : ChangeLog [8] : Group ID [9] : Connected To [10] ---------------------:---------:----------:--------------:----------:----------------:-----------:----------:------------:---------------:--------------:------------------------------- oud-ds-rs-0:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-0:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-1:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-1:1898 : : : : : : : : : : : (GID=1) oud-ds-rs-2:1444 : 1 : 0 : 0 : 1898 : Disabled : Trusted : -- : Normal : Enabled : 1 : oud-ds-rs-2:1898 : : : : : : : : : : : (GID=1) Replication Server [11] : RS #1 : RS #2 : RS #3 -------------------------------:-------:-------:------ oud-ds-rs-0:1898 : -- : Yes : Yes (#1) : : : oud-ds-rs-1:1898 : Yes : -- : Yes (#2) : : : oud-ds-rs-2:1898 : Yes : Yes : -- (#3) : : : [1] The number of changes that are still missing on this element (and that have been applied to at least one other server). [2] Age of oldest missing change: the age (in seconds) of the oldest change that has not yet arrived on this element. [3] The replication port used to communicate between the servers whose contents are being replicated. [4] Whether the replication communication initiated by this element is encrypted or not. [5] Whether the directory server is trusted or not. Updates coming from an untrusted server are discarded and not propagated. [6] The number of untrusted changes. These are changes generated on this server while it is untrusted. Those changes are not propagated to the rest of the topology but are effective on the untrusted server. [7] The status of the replication on this element. [8] Whether the external change log is enabled for the base DN on this server or not. [9] The ID of the replication group to which the server belongs. [10] The replication server this server is connected to with its group ID between brackets. [11] This table represents the connections between the replication servers. The headers of the columns use a number as identifier for each replication server. See the values of the first column to identify the corresponding replication server for each number. Type exit to exit the pod.\n Using kubectl commands The dsreplication status command can be invoked using the following kubectl command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; exec -it -c \u0026lt;containername\u0026gt; \u0026lt;podname\u0026gt; -- \\ /u01/oracle/user_projects/\u0026lt;OUD Instance/Pod Name\u0026gt;/OUD/bin/dsreplication status \\ --trustAll --hostname \u0026lt;OUD Instance/Pod Name\u0026gt; --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections For example:\n$ kubectl --namespace oudns exec -it -c oud-ds-rs oud-ds-rs-0 -- \\ /u01/oracle/user_projects/oud-ds-rs-0/OUD/bin/dsreplication status \\ --trustAll --hostname oud-ds-rs-0 --port 1444 --adminUID admin \\ --dataToDisplay compat-view --dataToDisplay rs-connections The output will be the same as per Run dresplication inside the pod.\n Verify the cronjob Run the following command to make sure the cronjob is created:\n$ kubectl get cronjob -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get cronjob -n oudns The output will look similar to the following:\nNAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE oud-pod-cron-job */30 * * * * False 0 \u0026lt;none\u0026gt; 15s Run the following command to make sure the job(s) is created:\n$ kubectl get job -n \u0026lt;namespace\u0026gt; -o wide For example:\n$ kubectl get job -n oudns -o wide The output will look similar to the following:\nNAME COMPLETIONS DURATION AGE CONTAINERS IMAGES SELECTOR oud-pod-cron-job-27467340 1/1 17s 6m48s cron-kubectl,cron-helm bitnami/kubectl:1.21.0,alpine/helm:3.2.0 controller-uid=e8e7dfe2-d197-4b84-a5a4-d203d54caaac Note: The jobs(s) will only be displayed after the time schedule originally set has elapsed. The default is 30 minutes).\n Disabling the cronjob If you need to disable the job, for example if maintenance needs to be performed on the node, you can disable the job as follows:\n Run the following command to edit the cronjob:\n$ kubectl edit cronjob pod-cron-job -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit cronjob oud-pod-cron-job -n oudns Note: This opens an edit session for the cronjob where parameters can be changed using standard vi commands.\n In the edit session search for suspend and change the vaule from false to true:\n... - name: oud-ds-rs-job-pv persistentVolumeClaim: claimName: oud-ds-rs-job-pvc schedule: '*/30 * * * *' successfulJobsHistoryLimit: 3 suspend: true ... Save the file and exit (wq!).\n Run the following to make sure the cronjob is suspended:\n$ kubectl get cronjob -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get cronjob -n oudns The output will look similar to the following:\nNAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE oud-pod-cron-job */30 * * * * True 0 11m 33m To enable the cronjob again, repeat the above steps and set suspend to false.\n Ingress Configuration With an OUD instance now deployed you are now ready to configure an ingress controller to direct traffic to OUD as per Configure an ingress for an OUD.\nUndeploy an OUD deployment Delete the OUD deployment Find the deployment release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oudns list The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oud-ds-rs oudns 1 2021-03-16 12:02:40.616927678 -0700 PDT deployed oud-ds-rs-12.2.1.4.0 12.2.1.4.0 Delete the deployment using the following command:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oudns oud-ds-rs release \u0026#34;oud-ds-rs\u0026#34; uninstalled Delete the persistent volume contents Delete the contents of the oud_user_projects directory in the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oud_user_projects $ rm -rf * For example:\n$ cd /scratch/shared/oud_user_projects $ rm -rf * Appendix: Configuration Parameters The following table lists the configurable parameters of the oud-ds-rs chart and their default values.\n Parameter Description Default Value replicaCount Number of DS+RS instances/pods/services to be created with replication enabled against a base Oracle Unified Directory instance/pod. 3 restartPolicyName restartPolicy to be configured for each POD containing Oracle Unified Directory instance OnFailure image.repository Oracle Unified Directory Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers oracle/oud image.tag Oracle Unified Directory Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oud-ds-rs-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type type of controller service to create ClusterIP nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities ingress.enabled true ingress.type Supported value: nginx nginx ingress.nginx.http.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.http.backendPort http ingress.nginx.http.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026quot;} ingress.nginx.admin.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-admin.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-admin-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.nginx.admin.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026rdquo; nginx.ingress.kubernetes.io/backend-protocol: \u0026ldquo;https\u0026quot;} ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject\u0026rsquo;s common name (cn) for SelfSigned Cert. \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through \u0026ndash;set, \u0026ndash;values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oud-ds-rs-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret Opaque persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided ReadWriteMany persistence.size Specifies the size of the storage 10Gi persistence.storageClass Specifies the storageclass of the persistence volume. empty persistence.annotations specifies any annotations that will be used { } configVolume.enabled If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true configVolume.mountPath If enabled, it will use the persistent volume. If value is false, PV and PVC would not be used and there would not be any mount point available for config false configVolume.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pv-config configVolume.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oud-ds-rs-\u0026lt; fullname \u0026gt;-pvc-config configVolume.type supported values: either filesystem or networkstorage or custom filesystem configVolume.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_user_projects configVolume.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_config configVolume.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 configVolume.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object configVolume.accessMode Specifies the access mode of the location provided ReadWriteMany configVolume.size Specifies the size of the storage 10Gi configVolume.storageClass Specifies the storageclass of the persistence volume. empty configVolume.annotations specifies any annotations that will be used { } oudPorts.adminldaps Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over LDAPS Protocol 1444 oudPorts.adminhttps Port on which Oracle Unified Directory Instance in the container should listen for Administration Communication over HTTPS Protocol. 1888 oudPorts.ldap Port on which Oracle Unified Directory Instance in the container should listen for LDAP Communication. 1389 oudPorts.ldaps Port on which Oracle Unified Directory Instance in the container should listen for LDAPS Communication. 1636 oudPorts.http Port on which Oracle Unified Directory Instance in the container should listen for HTTP Communication. 1080 oudPorts.https Port on which Oracle Unified Directory Instance in the container should listen for HTTPS Communication. 1081 oudPorts.replication Port value to be used while setting up replication server. 1898 oudConfig.baseDN BaseDN for Oracle Unified Directory Instances dc=example,dc=com oudConfig.rootUserDN Root User DN for Oracle Unified Directory Instances cn=Directory Manager oudConfig.rootUserPassword Password for Root User DN RandomAlphanum oudConfig.sampleData To specify that the database should be populated with the specified number of sample entries. 0 oudConfig.sleepBeforeConfig Based on the value for this parameter, initialization/configuration of each Oracle Unified Directory replica would be delayed. 120 oudConfig.adminUID AdminUID to be configured with each replicated Oracle Unified Directory instance admin oudConfig.adminPassword Password for AdminUID. If the value is not passed, value of rootUserPassword would be used as password for AdminUID. rootUserPassword baseOUD.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to POD for Base Oracle Unified Directory Instance. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData. - baseOUD.envVars Environment variables in Yaml Map format. This is helpful when its requried to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. - replOUD.envVarsConfigMap Reference to ConfigMap which can contain additional environment variables to be passed on to PODs for Replicated Oracle Unified Directory Instances. Following are the environment variables which would not be honored from the ConfigMap. instanceType, sleepBeforeConfig, OUD_INSTANCE_NAME, hostname, baseDN, rootUserDN, rootUserPassword, adminConnectorPort, httpAdminConnectorPort, ldapPort, ldapsPort, httpPort, httpsPort, replicationPort, sampleData, sourceHost, sourceServerPorts, sourceAdminConnectorPort, sourceReplicationPort, dsreplication_1, dsreplication_2, dsreplication_3, dsreplication_4, post_dsreplication_dsconfig_1, post_dsreplication_dsconfig_2 - replOUD.envVars Environment variables in Yaml Map format. This is helpful when its required to pass environment variables through \u0026ndash;values file. List of env variables which would not be honored from envVars map is same as list of env var names mentioned for envVarsConfigMap. - replOUD.groupId Group ID to be used/configured with each Oracle Unified Directory instance in replicated topology. 1 elk.elasticsearch.enabled If enabled it will create the elastic search statefulset deployment false elk.elasticsearch.image.repository Elastic Search Image name/Registry/Repository . Based on this elastic search instances will be created docker.elastic.co/elasticsearch/elasticsearch elk.elasticsearch.image.tag Elastic Search Image tag .Based on this, image parameter would be configured for Elastic Search pods/instances 6.4.3 elk.elasticsearch.image.pullPolicy policy to pull the image IfnotPresent elk.elasticsearch.esreplicas Number of Elastic search Instances will be created 3 elk.elasticsearch.minimumMasterNodes The value for discovery.zen.minimum_master_nodes. Should be set to (esreplicas / 2) + 1. 2 elk.elasticsearch.esJAVAOpts Java options for Elasticsearch. This is where you should configure the jvm heap size -Xms512m -Xmx512m elk.elasticsearch.sysctlVmMaxMapCount Sets the sysctl vm.max_map_count needed for Elasticsearch 262144 elk.elasticsearch.resources.requests.cpu cpu resources requested for the elastic search 100m elk.elasticsearch.resources.limits.cpu total cpu limits that are configures for the elastic search 1000m elk.elasticsearch.esService.type Type of Service to be created for elastic search ClusterIP elk.elasticsearch.esService.lbrtype Type of load balancer Service to be created for elastic search ClusterIP elk.kibana.enabled If enabled it will create a kibana deployment false elk.kibana.image.repository Kibana Image Registry/Repository and name. Based on this Kibana instance will be created docker.elastic.co/kibana/kibana elk.kibana.image.tag Kibana Image tag. Based on this, Image parameter would be configured. 6.4.3 elk.kibana.image.pullPolicy policy to pull the image IfnotPresent elk.kibana.kibanaReplicas Number of Kibana instances will be created 1 elk.kibana.service.tye Type of service to be created NodePort elk.kibana.service.targetPort Port on which the kibana will be accessed 5601 elk.kibana.service.nodePort nodePort is the port on which kibana service will be accessed from outside 31119 elk.logstash.enabled If enabled it will create a logstash deployment false elk.logstash.image.repository logstash Image Registry/Repository and name. Based on this logstash instance will be created logstash elk.logstash.image.tag logstash Image tag. Based on this, Image parameter would be configured. 6.6.0 elk.logstash.image.pullPolicy policy to pull the image IfnotPresent elk.logstash.containerPort Port on which the logstash container will be running 5044 elk.logstash.service.tye Type of service to be created NodePort elk.logstash.service.targetPort Port on which the logstash will be accessed 9600 elk.logstash.service.nodePort nodePort is the port on which logstash service will be accessed from outside 32222 elk.logstash.logstashConfigMap Provide the configmap name which is already created with the logstash conf. if empty default logstash configmap will be created and used elk.elkPorts.rest Port for REST 9200 elk.elkPorts.internode port used for communication between the nodes 9300 elk.busybox.image busy box image name. Used for initcontianers busybox elk.elkVolume.enabled If enabled, it will use the persistent volume. if value is false, PV and pods would be using the default emptyDir mount volume. true elk.elkVolume.pvname pvname to use an already created Persistent Volume , If blank will use the default name oud-ds-rs-\u0026lt; fullname \u0026gt;-espv elk.elkVolume.type supported values: either filesystem or networkstorage or custom filesystem elk.elkVolume.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_elk/data elk.elkVolume.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oud_elk/data elk.elkVolume.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 elk.elkVolume.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object elk.elkVolume.accessMode Specifies the access mode of the location provided ReadWriteMany elk.elkVolume.size Specifies the size of the storage 20Gi elk.elkVolume.storageClass Specifies the storageclass of the persistence volume. elk elk.elkVolume.annotations specifies any annotations that will be used { } " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/create-oudsm-instances/", + "title": "Create Oracle Unified Directory Services Manager Instances", + "tags": [], + "description": "This document provides details of the oudsm Helm chart.", + "content": " Introduction Create a Kubernetes namespace Create a Kubernetes secret for the container registry Create a persistent volume directory The oudsm Helm chart Create OUDSM instances Helm command output Verify the OUDSM deployment Undeploy an OUDSM deployment Appendix: Configuration parameters Introduction This chapter demonstrates how to deploy Oracle Unified Directory Services Manager (OUDSM) 12c instance(s) using the Helm package manager for Kubernetes.\nBased on the configuration, this chart deploys the following objects in the specified namespace of a Kubernetes cluster.\n Service Account Secret Persistent Volume and Persistent Volume Claim Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances Services for interfaces exposed through Oracle Unified Directory Services Manager Instances Ingress configuration Create a Kubernetes namespace Create a Kubernetes namespace for the OUDSM deployment by running the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace oudsmns The output will look similar to the following:\nnamespace/oudsmns created Create a Kubernetes secret for the container registry Create a Kubernetes secret that stores the credentials for the container registry where the OUDSM image is stored. This step must be followed if using Oracle Container Registry or your own private container registry. If you are not using a container registry and have loaded the images on each of the master and worker nodes, you can skip this step.\n Run the following command to create the secret:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=\u0026lt;CONTAINER_REGISTRY\u0026gt; \\ --docker-username=\u0026#34;\u0026lt;USER_NAME\u0026gt;\u0026#34; \\ --docker-password=\u0026lt;PASSWORD\u0026gt; --docker-email=\u0026lt;EMAIL_ID\u0026gt; \\ --namespace=\u0026lt;domain_namespace\u0026gt; For example, if using Oracle Container Registry:\nkubectl create secret docker-registry \u0026#34;orclcred\u0026#34; --docker-server=container-registry.oracle.com \\ --docker-username=\u0026#34;user@example.com\u0026#34; \\ --docker-password=password --docker-email=user@example.com \\ --namespace=oudsmns Replace \u0026lt;USER_NAME\u0026gt; and \u0026lt;PASSWORD\u0026gt; with the credentials for the registry with the following caveats:\n If using Oracle Container Registry to pull the OUDSM container image, this is the username and password used to login to Oracle Container Registry. Before you can use this image you must login to Oracle Container Registry, navigate to Middleware \u0026gt; oudsm_cpu and accept the license agreement.\n If using your own container registry to store the OUDSM container image, this is the username and password (or token) for your container registry.\n The output will look similar to the following:\nsecret/orclcred created Create a persistent volume directory As referenced in Prerequisites the nodes in the Kubernetes cluster must have access to a persistent volume such as a Network File System (NFS) mount or a shared file system.\nMake sure the persistent volume path has full access permissions, and that the folder is empty. In this example /scratch/shared/ is a shared directory accessible from all nodes.\n On the master node run the following command to create a user_projects directory:\n$ cd \u0026lt;persistent_volume\u0026gt; $ mkdir oudsm_user_projects $ chmod 777 oudsm_user_projects For example:\n$ cd /scratch/shared $ mkdir oudsm_user_projects $ chmod 777 oudsm_user_projects On the master node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects $ touch file.txt $ ls filemaster.txt For example:\n$ cd /scratch/shared/oudsm_user_projects $ touch filemaster.txt $ ls filemaster.txt On the first worker node run the following to ensure it is possible to read and write to the persistent volume:\n$ cd /scratch/shared/oudsm_user_projects $ ls filemaster.txt $ touch fileworker1.txt $ ls fileworker1.txt Repeat the above for any other worker nodes e.g fileworker2.txt etc. Once proven that it\u0026rsquo;s possible to read and write from each node to the persistent volume, delete the files created.\n The oudsm Helm chart The oudsm Helm chart allows you to create or deploy Oracle Unified Directory Services Manager instances along with Kubernetes objects in a specified namespace.\nThe deployment can be initiated by running the following Helm command with reference to the oudsm Helm chart, along with configuration parameters according to your environment.\ncd $WORKDIR/kubernetes/helm $ helm install --namespace \u0026lt;namespace\u0026gt; \\ \u0026lt;Configuration Parameters\u0026gt; \\ \u0026lt;deployment/release name\u0026gt; \\ \u0026lt;Helm Chart Path/Name\u0026gt; Configuration Parameters (override values in chart) can be passed on with --set arguments on the command line and/or with -f / --values arguments when referring to files.\nNote: The examples in Create OUDSM instances below provide values which allow the user to override the default values provided by the Helm chart. A full list of configuration parameters and their default values is shown in Appendix: Configuration parameters.\nFor more details about the helm command and parameters, please execute helm --help and helm install --help.\nCreate OUDSM instances You can create OUDSM instances using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create an oudsm-values-override.yaml as follows:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudsm: adminUser: weblogic adminPass: \u0026lt;password\u0026gt; persistence: type: filesystem filesystem: hostPath: path: \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects For example:\nimage: repository: container-registry.oracle.com/middleware/oudsm_cpu tag: 12.2.1.4-jdk8-ol7-220223.2053 pullPolicy: IfNotPresent imagePullSecrets: - name: orclcred oudsm: adminUser: weblogic adminPass: \u0026lt;password\u0026gt; persistence: type: filesystem filesystem: hostPath: path: /scratch/shared/oudsm_user_projects The following caveats exist:\n Replace \u0026lt;password\u0026gt; with a the relevant passwords.\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred If using NFS for your persistent volume the change the persistence section as follows:\n persistence: type: networkstorage networkstorage: nfs: path: \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects server: \u0026lt;NFS IP address\u0026gt; Run the following command to deploy OUDSM:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values oudsm-values-override.yaml \\ \u0026lt;release_name\u0026gt; oudsm $ helm install --namespace oudsmns \\ --values oudsm-values-override.yaml \\ oudsm oudsm Check the OUDSM deployment as per Verify the OUDSM deployment\n Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to create OUDSM instance:\n$ helm install --namespace oudsmns \\ --set oudsm.adminUser=weblogic,oudsm.adminPass=\u0026lt;password\u0026gt;,persistence.filesystem.hostPath.path=\u0026lt;persistent_volume\u0026gt;/oudsm_user_projects,image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release_name\u0026gt; oudsm For example:\n$ helm install --namespace oudsmns \\ --set oudsm.adminUser=weblogic,oudsm.adminPass=\u0026lt;password\u0026gt;,persistence.filesystem.hostPath.path=/scratch/shared/oudsm_user_projects,image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-220223.2053 \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oudsm oudsm The following caveats exist:\n Replace \u0026lt;password\u0026gt; with a the relevant password. If you are not using Oracle Container Registry or your own container registry for your OUDSM container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot; If using using NFS for your persistent volume then use persistence.networkstorage.nfs.path=\u0026lt;persistent_volume\u0026gt;/oudsm_user_projects,persistence.networkstorage.nfs.server:\u0026lt;NFS IP address\u0026gt;. Check the OUDSM deployment as per Verify the OUDSM deployment\n Helm command output In all the examples above, the following output is shown following a successful execution of the helm install command.\nNAME: oudsm LAST DEPLOYED: Mon Mar 21 12:21:06 2022 NAMESPACE: oudsmns STATUS: deployed REVISION: 1 TEST SUITE: None Verify the OUDSM deployment Run the following command to verify the OUDSM deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.96.108.200 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-lbr ClusterIP 10.96.41.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm NAME TYPE DATA AGE secret/default-token-w4jft kubernetes.io/service-account-token 3 3h15m secret/orclcred kubernetes.io/dockerconfigjson 1 3h13m secret/oudsm-creds opaque 2 73m secret/oudsm-token-ksr4g kubernetes.io/service-account-token 3 73m secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 73m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oudsm-pv 30Gi RWX Retain Bound myoudsmns/oudsm-pvc manual 73m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 30Gi RWX manual 73m Filesystem NAME HOSTS ADDRESS PORTS AGE ingress.extensions/oudsm-ingress-nginx oudsm-1,oudsm-2,oudsm + 1 more... 100.102.51.230 80 73m Note: It will take several minutes before all the services listed above show. While the oudsm pods have a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n$ kubectl logs oudsm-1 -n oudsmns Note : If the OUD deployment fails additionally refer to Troubleshooting for instructions on how describe the failing pod(s). Once the problem is identified follow Undeploy an OUDSM deployment to clean down the deployment before deploying again.\nKubernetes Objects Kubernetes objects created by the Helm chart are detailed in the table below:\n Type Name Example Name Purpose Service Account \u0026lt;deployment/release name\u0026gt; oudsm Kubernetes Service Account for the Helm Chart deployment Secret \u0026lt;deployment/release name\u0026gt;-creds oudsm-creds Secret object for Oracle Unified Directory Services Manager related critical values like passwords Persistent Volume \u0026lt;deployment/release name\u0026gt;-pv oudsm-pv Persistent Volume for user_projects mount. Persistent Volume Claim \u0026lt;deployment/release name\u0026gt;-pvc oudsm-pvc Persistent Volume Claim for user_projects mount. Pod \u0026lt;deployment/release name\u0026gt;-N oudsm-1, oudsm-2, \u0026hellip; Pod(s)/Container(s) for Oracle Unified Directory Services Manager Instances Service \u0026lt;deployment/release name\u0026gt;-N oudsm-1, oudsm-2, \u0026hellip; Service(s) for HTTP and HTTPS interfaces from Oracle Unified Directory Services Manager instance \u0026lt;deployment/release name\u0026gt;-N Ingress \u0026lt;deployment/release name\u0026gt;-ingress-nginx oudsm-ingress-nginx Ingress Rules for HTTP and HTTPS interfaces. In the table above, the Example Name for each Object is based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. Ingress Configuration With an OUDSM instance now deployed you are now ready to configure an ingress controller to direct traffic to OUDSM as per Configure an ingress for an OUDSM.\nUndeploy an OUDSM deployment Delete the OUDSM deployment Find the deployment release name:\n$ helm --namespace \u0026lt;namespace\u0026gt; list For example:\n$ helm --namespace oudsmns list The output will look similar to the following:\nNAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION oudsm oudsmns 2 2022-03-21 16:46:34.05531056 +0000 UTC deployed oudsm-0.1 12.2.1.4.0 Delete the deployment using the following command:\n$ helm uninstall --namespace \u0026lt;namespace\u0026gt; \u0026lt;release\u0026gt; For example:\n$ helm uninstall --namespace oudsmns oudsm release \u0026#34;oudsm\u0026#34; uninstalled Delete the persistent volume contents Delete the contents of the oudsm_user_projects directory in the persistent volume:\n$ cd \u0026lt;persistent_volume\u0026gt;/oudsm_user_projects $ rm -rf * For example:\n$ cd /scratch/shared/oudsm_user_projects $ rm -rf * Appendix: Configuration Parameters The following table lists the configurable parameters of the \u0026lsquo;oudsm\u0026rsquo; chart and their default values.\n Parameter Description Default Value replicaCount Number of Oracle Unified Directory Services Manager instances/pods/services to be created 1 restartPolicyName restartPolicy to be configured for each POD containing Oracle Unified Directory Services Manager instance OnFailure image.repository Oracle Unified Directory Services Manager Image Registry/Repository and name. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers oracle/oudsm image.tag Oracle Unified Directory Services Manager Image Tag. Based on this, image parameter would be configured for Oracle Unified Directory Services Manager pods/containers 12.2.1.4.0 image.pullPolicy policy to pull the image IfnotPresent imagePullSecrets.name name of Secret resource containing private registry credentials regcred nameOverride override the fullname with this name fullnameOverride Overrides the fullname with the provided string serviceAccount.create Specifies whether a service account should be created true serviceAccount.name If not set and create is true, a name is generated using the fullname template oudsm-\u0026lt; fullname \u0026gt;-token-\u0026lt; randomalphanum \u0026gt; podSecurityContext Security context policies to add to the controller pod securityContext Security context policies to add by default service.type type of controller service to create ClusterIP nodeSelector node labels for pod assignment tolerations node taints to tolerate affinity node/pod affinities ingress.enabled true ingress.type Supported value: nginx nginx ingress.host Hostname to be used with Ingress Rules. If not set, hostname would be configured according to fullname. Hosts would be configured as \u0026lt; fullname \u0026gt;-http.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-0.\u0026lt; domain \u0026gt;, \u0026lt; fullname \u0026gt;-http-1.\u0026lt; domain \u0026gt;, etc. ingress.domain Domain name to be used with Ingress Rules. In ingress rules, hosts would be configured as \u0026lt; host \u0026gt;.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-0.\u0026lt; domain \u0026gt;, \u0026lt; host \u0026gt;-1.\u0026lt; domain \u0026gt;, etc. ingress.backendPort http ingress.nginxAnnotations { kubernetes.io/ingress.class: \u0026ldquo;nginx\u0026quot;nginx.ingress.kubernetes.io/affinity-mode: \u0026ldquo;persistent\u0026rdquo; nginx.ingress.kubernetes.io/affinity: \u0026ldquo;cookie\u0026rdquo; } ingress.ingress.tlsSecret Secret name to use an already created TLS Secret. If such secret is not provided, one would be created with name \u0026lt; fullname \u0026gt;-tls-cert. If the TLS Secret is in different namespace, name can be mentioned as \u0026lt; namespace \u0026gt;/\u0026lt; tlsSecretName \u0026gt; ingress.certCN Subject\u0026rsquo;s common name (cn) for SelfSigned Cert. \u0026lt; fullname \u0026gt; ingress.certValidityDays Validity of Self-Signed Cert in days 365 secret.enabled If enabled it will use the secret created with base64 encoding. if value is false, secret would not be used and input values (through \u0026ndash;set, \u0026ndash;values, etc.) would be used while creation of pods. true secret.name secret name to use an already created Secret oudsm-\u0026lt; fullname \u0026gt;-creds secret.type Specifies the type of the secret Opaque persistence.enabled If enabled, it will use the persistent volume. if value is false, PV and PVC would not be used and pods would be using the default emptyDir mount volume. true persistence.pvname pvname to use an already created Persistent Volume , If blank will use the default name oudsm-\u0026lt; fullname \u0026gt;-pv persistence.pvcname pvcname to use an already created Persistent Volume Claim , If blank will use default name oudsm-\u0026lt; fullname \u0026gt;-pvc persistence.type supported values: either filesystem or networkstorage or custom filesystem persistence.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oudsm_user_projects persistence.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oudsm_user_projects persistence.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 persistence.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object persistence.accessMode Specifies the access mode of the location provided ReadWriteMany persistence.size Specifies the size of the storage 10Gi persistence.storageClass Specifies the storageclass of the persistence volume. empty persistence.annotations specifies any annotations that will be used { } oudsm.adminUser Weblogic Administration User weblogic oudsm.adminPass Password for Weblogic Administration User oudsm.startupTime Expected startup time. After specified seconds readinessProbe would start 900 oudsm.livenessProbeInitialDelay Paramter to decide livenessProbe initialDelaySeconds 1200 elk.elasticsearch.enabled If enabled it will create the elastic search statefulset deployment false elk.elasticsearch.image.repository Elastic Search Image name/Registry/Repository . Based on this elastic search instances will be created docker.elastic.co/elasticsearch/elasticsearch elk.elasticsearch.image.tag Elastic Search Image tag .Based on this, image parameter would be configured for Elastic Search pods/instances 6.4.3 elk.elasticsearch.image.pullPolicy policy to pull the image IfnotPresent elk.elasticsearch.esreplicas Number of Elastic search Instances will be created 3 elk.elasticsearch.minimumMasterNodes The value for discovery.zen.minimum_master_nodes. Should be set to (esreplicas / 2) + 1. 2 elk.elasticsearch.esJAVAOpts Java options for Elasticsearch. This is where you should configure the jvm heap size -Xms512m -Xmx512m elk.elasticsearch.sysctlVmMaxMapCount Sets the sysctl vm.max_map_count needed for Elasticsearch 262144 elk.elasticsearch.resources.requests.cpu cpu resources requested for the elastic search 100m elk.elasticsearch.resources.limits.cpu total cpu limits that are configures for the elastic search 1000m elk.elasticsearch.esService.type Type of Service to be created for elastic search ClusterIP elk.elasticsearch.esService.lbrtype Type of load balancer Service to be created for elastic search ClusterIP elk.kibana.enabled If enabled it will create a kibana deployment false elk.kibana.image.repository Kibana Image Registry/Repository and name. Based on this Kibana instance will be created docker.elastic.co/kibana/kibana elk.kibana.image.tag Kibana Image tag. Based on this, Image parameter would be configured. 6.4.3 elk.kibana.image.pullPolicy policy to pull the image IfnotPresent elk.kibana.kibanaReplicas Number of Kibana instances will be created 1 elk.kibana.service.tye Type of service to be created NodePort elk.kibana.service.targetPort Port on which the kibana will be accessed 5601 elk.kibana.service.nodePort nodePort is the port on which kibana service will be accessed from outside 31119 elk.logstash.enabled If enabled it will create a logstash deployment false elk.logstash.image.repository logstash Image Registry/Repository and name. Based on this logstash instance will be created logstash elk.logstash.image.tag logstash Image tag. Based on this, Image parameter would be configured. 6.6.0 elk.logstash.image.pullPolicy policy to pull the image IfnotPresent elk.logstash.containerPort Port on which the logstash container will be running 5044 elk.logstash.service.tye Type of service to be created NodePort elk.logstash.service.targetPort Port on which the logstash will be accessed 9600 elk.logstash.service.nodePort nodePort is the port on which logstash service will be accessed from outside 32222 elk.logstash.logstashConfigMap Provide the configmap name which is already created with the logstash conf. if empty default logstash configmap will be created and used elk.elkPorts.rest Port for REST 9200 elk.elkPorts.internode port used for communication between the nodes 9300 elk.busybox.image busy box image name. Used for initcontianers busybox elk.elkVolume.enabled If enabled, it will use the persistent volume. if value is false, PV and pods would be using the default emptyDir mount volume. true elk.elkVolume.pvname pvname to use an already created Persistent Volume , If blank will use the default name oudsm-\u0026lt; fullname \u0026gt;-espv elk.elkVolume.type supported values: either filesystem or networkstorage or custom filesystem elk.elkVolume.filesystem.hostPath.path The path location mentioned should be created and accessible from the local host provided with necessary privileges for the user. /scratch/shared/oud_elk/data elk.elkVolume.networkstorage.nfs.path Path of NFS Share location /scratch/shared/oudsm_elk/data elk.elkVolume.networkstorage.nfs.server IP or hostname of NFS Server 0.0.0.0 elk.elkVolume.custom.* Based on values/data, YAML content would be included in PersistenceVolume Object elk.elkVolume.accessMode Specifies the access mode of the location provided ReadWriteMany elk.elkVolume.size Specifies the size of the storage 20Gi elk.elkVolume.storageClass Specifies the storageclass of the persistence volume. elk elk.elkVolume.annotations specifies any annotations that will be used { } " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/manage-oig-domains/logging-and-visualization/", + "title": "Logging and visualization", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": "After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.\nInstall Elasticsearch and Kibana If your domain namespace is anything other than oigns, edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of oigns to your domain namespace.\n Create a Kubernetes secret to access the elasticsearch and kibana container images:\nNote: You must first have a user account on hub.docker.com.\n$ kubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; --docker-username=\u0026#34;\u0026lt;docker_username\u0026gt;\u0026#34; --docker-password=\u0026lt;password\u0026gt; --docker-email=\u0026lt;docker_email_credentials\u0026gt; --namespace=\u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create secret docker-registry \u0026quot;dockercred\u0026quot; --docker-server=\u0026quot;https://index.docker.io/v1/\u0026quot; --docker-username=\u0026quot;username\u0026quot; --docker-password=\u0026lt;password\u0026gt; --docker-email=user@example.com --namespace=oigns The output will look similar to the following:\nsecret/dockercred created Create the Kubernetes resource using the following command:\n$ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml The output will look similar to the following:\ndeployment.apps/elasticsearch created service/elasticsearch created deployment.apps/kibana created service/kibana created Run the following command to ensure Elasticsearch is used by the operator:\n$ helm get values --all weblogic-kubernetes-operator -n opns The output will look similar to the following:\nCOMPUTED VALUES: clusterSizePaddingValidationEnabled: true domainNamespaceLabelSelector: weblogic-operator=enabled domainNamespaceSelectionStrategy: LabelSelector domainNamespaces: - default elasticSearchHost: elasticsearch.default.svc.cluster.local elasticSearchPort: 9200 elkIntegrationEnabled: true enableClusterRoleBinding: true externalDebugHttpPort: 30999 externalRestEnabled: false externalRestHttpsPort: 31001 externalServiceNameSuffix: -ext image: ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 imagePullPolicy: IfNotPresent internalDebugHttpPort: 30999 introspectorJobNameSuffix: -introspector javaLoggingFileCount: 10 javaLoggingFileSizeLimit: 20000000 javaLoggingLevel: FINE logStashImage: logstash:6.6.0 remoteDebugNodePortEnabled: false serviceAccount: op-sa suspendOnDebugStartup: false To check that Elasticsearch and Kibana are deployed in the Kubernetes cluster, run the following command:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; | grep \u0026#39;elasticsearch\\|kibana\u0026#39; For example:\n$ kubectl get pods -n oigns | grep \u0026#39;elasticsearch\\|kibana\u0026#39; The output will look similar to the following:\nelasticsearch-857bd5ff6b-tvqdn 1/1 Running 0 2m9s kibana-594465687d-zc2rt 1/1 Running 0 2m9s Create the logstash pod OIG Server logs can be pushed to the Elasticsearch server using the logstash pod. The logstash pod needs access to the persistent volume of the OIG domain created previously, for example governancedomain-domain-pv. The steps to create the logstash pod are as follows:\n Obtain the OIG domain persistence volume details:\n$ kubectl get pv -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pv -n oigns The output will look similar to the following:\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE governancedomain-domain-pv 10Gi RWX Retain Bound oigns/governancedomain-domain-pvc governancedomain-oim-storage-class 28h Make note of the CLAIM value, for example in this case governancedomain-oim-pvc\n Run the following command to get the mountPath of your domain:\n$ kubectl describe domains \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; | grep \u0026#34;Mount Path\u0026#34; For example:\n$ kubectl describe domains governancedomain -n oigns | grep \u0026#34;Mount Path\u0026#34; The output will look similar to the following:\nMount Path: /u01/oracle/user_projects/domains Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows. Change the claimName and mountPath values to match the values returned in the previous commands. Change namespace to your domain namespace e.g oigns:\napiVersion: apps/v1 kind: Deployment metadata: name: logstash-wls namespace: oigns spec: selector: matchLabels: k8s-app: logstash-wls template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash-wls spec: volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: governancedomain-domain-pvc - name: shared-logs emptyDir: {} imagePullSecrets: - name: dockercred containers: - name: logstash image: logstash:6.6.0 command: [\u0026quot;/bin/sh\u0026quot;] args: [\u0026quot;/usr/share/logstash/bin/logstash\u0026quot;, \u0026quot;-f\u0026quot;, \u0026quot;/u01/oracle/user_projects/domains/logstash/logstash.conf\u0026quot;] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs ports: - containerPort: 5044 name: logstash In the persistent volume directory that corresponds to the mountPath /u01/oracle/user_projects/domains, create a logstash directory. For example:\n$ mkdir -p /scratch/shared/governancedomainpv/logstash Create a logstash.conf in the newly created logstash directory that contains the following. Make sure the paths correspond to your mountPath and domain name. Also, if your namespace is anything other than oigns change \u0026quot;elasticsearch.oigns.svc.cluster.local:9200\u0026quot; to \u0026quot;elasticsearch.\u0026lt;namespace\u0026gt;.svc.cluster.local:9200\u0026quot;::\ninput { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/soa_server*.log\u0026quot; tags =\u0026gt; \u0026quot;soaserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/governancedomain/oim_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/soa_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Soa_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/oim_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Oimserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/governancedomain/servers/**/logs/access*.log\u0026quot; tags =\u0026gt; \u0026quot;Access_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc}\u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;elasticsearch.oigns.svc.cluster.local:9200\u0026quot;] } } Deploy the logstash pod by executing the following command:\n$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps/logstash-wls created Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE elasticsearch-678ff4fb5-89rpf 1/1 Running 0 13m governancedomain-adminserver 1/1 Running 0 90m governancedomain-create-fmw-infra-sample-domain-job-8cww8 0/1 Completed 0 25h governancedomain-oim-server1 1/1 Running 0 87m governancedomain-soa-server1 1/1 Running 0 87m kibana-589466bb89-k8wdr 1/1 Running 0 13m logstash-wls-f448b44c8-92l27 1/1 Running 0 7s Verify and access the Kibana console Check if the indices are created correctly in the elasticsearch pod shown above:\n$ kubectl exec -it \u0026lt;elasticsearch-pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it elasticsearch-678ff4fb5-89rpf -n oigns -- /bin/bash This will take you into a bash shell in the elasticsearch pod:\n[root@elasticsearch-678ff4fb5-89rpf elasticsearch]# In the elasticsearch bash shell run the following to check the indices:\n[root@elasticsearch-678ff4fb5-89rpf elasticsearch]# curl -i \u0026#34;127.0.0.1:9200/_cat/indices?v\u0026#34; The output will look similar to the following:\nHTTP/1.1 200 OK content-type: text/plain; charset=UTF-8 content-length: 580 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size yellow open logstash-2022.03.10 7oXXCureSWKwNY0626Szeg 5 1 46887 0 11.7mb 11.7mb green open .kibana_task_manager alZtnv2WRy6Y4iSRIbmCrQ 1 0 2 0 12.6kb 12.6kb green open .kibana_1 JeZKrO4fS_GnRL92qRmQDQ 1 0 2 0 7.6kb 7.6kb Exit the bash shell by typing exit.\n Find the Kibana port by running the following command:\n$ kubectl get svc -n \u0026lt;namespace\u0026gt; | grep kibana For example:\n$ kubectl get svc -n oigns | grep kibana The output will look similar to the following:\nkibana NodePort 10.111.224.230 \u0026lt;none\u0026gt; 5601:31490/TCP 11m In the example above the Kibana port is 31490.\n Access the Kibana console with http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana.\n Click on Dashboard in the left hand Navigation Menu.\n In the Create index pattern page enter logstash* and click Next Step.\n From the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the logs.\n For more details on how to use the Kibana console see the Kibana Guide\nCleanup To clean up the Elasticsearch and Kibana install:\n Run the following command to delete logstash:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps \u0026quot;logstash-wls\u0026quot; deleted Run the following command to delete Elasticsearch and Kibana:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml The output will look similar to the following:\ndeployment.apps \u0026quot;elasticsearch\u0026quot; deleted service \u0026quot;elasticsearch\u0026quot; deleted deployment.apps \u0026quot;kibana\u0026quot; deleted service \u0026quot;kibana\u0026quot; deleted " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/configure-ingress/", + "title": "Configure an Ingress for an OAM domain", + "tags": [], + "description": "This document provides steps to configure an Ingress to direct traffic to the OAM domain.", + "content": "Setting up an ingress for NGINX for the OAM Domain The instructions below explain how to set up NGINX as an ingress for the OAM domain with SSL termination.\nNote: All the steps below should be performed on the master node.\n Generate a SSL Certificate Install NGINX Create an Ingress for the Domain Verify that you can access the domain URL Generate a SSL Certificate Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.\nIf you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:\n$ mkdir \u0026lt;workdir\u0026gt;/ssl $ cd \u0026lt;workdir\u0026gt;/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=\u0026lt;nginx-hostname\u0026gt;\u0026#34; For example:\n$ mkdir /scratch/OAMK8S/ssl $ cd /scratch/OAMK8S/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=masternode.example.com\u0026#34; Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.\nThe output will look similar to the following:\nGenerating a 2048 bit RSA private key ..........................................+++ .......................................................................................................+++ writing new private key to 'tls.key' ----- Create a secret for SSL by running the following command:\n$ kubectl -n oamns create secret tls \u0026lt;domain_uid\u0026gt;-tls-cert --key \u0026lt;workdir\u0026gt;/tls.key --cert \u0026lt;workdir\u0026gt;/tls.crt For example:\n$ kubectl -n oamns create secret tls accessdomain-tls-cert --key /scratch/OAMK8S/ssl/tls.key --cert /scratch/OAMK8S/ssl/tls.crt The output will look similar to the following:\nsecret/accessdomain-tls-cert created Install NGINX Use helm to install NGINX.\n Add the helm chart repository for NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. ⎈ Happy Helming!⎈ Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n \u0026lt;domain_namespace\u0026gt; --set controller.extraArgs.default-ssl-certificate=\u0026lt;domain_namespace\u0026gt;/\u0026lt;ssl_secret\u0026gt; --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx For example:\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Mon Mar 7 13:57:21 2022 NAMESPACE: oamns STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-controller) export HTTPS_NODE_PORT=$(kubectl --namespace oamns get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-controller) export NODE_IP=$(kubectl --namespace oamns get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\n$ helm install nginx-ingress -n oamns --set controller.extraArgs.default-ssl-certificate=oamns/accessdomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress LAST DEPLOYED: Mon Mar 7 13:57:21 2022 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace oamns get services -o wide -w nginx-ingress-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Create an Ingress for the Domain Navigate to the following directory:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit the values.yaml and change the domainUID: parameter to match your domainUID, for example domainUID: accessdomain. The file should look as follows:\n# Load balancer type. Supported values are: NGINX type: NGINX # Type of Configuration Supported Values are : SSL and NONSSL sslType: SSL # domainType Supported values are soa,osb and soaosb. #WLS domain as backend to the load balancer wlsDomain: domainUID: accessdomain adminServerName: AdminServer adminServerPort: 7001 adminServerSSLPort: oamClusterName: oam_cluster oamManagedServerPort: 14100 oamManagedServerSSLPort: policyClusterName: policy_cluster policyManagedServerPort: 15100 policyManagedServerSSLPort: Run the following helm command to install the ingress:\n$ cd $WORKDIR $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace \u0026lt;domain_namespace\u0026gt; --values kubernetes/charts/ingress-per-domain/values.yaml For example:\n$ cd $WORKDIR $ helm install oam-nginx kubernetes/charts/ingress-per-domain --namespace oamns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\nNAME: oam-nginx LAST DEPLOYED: Mon Mar 7 14:01:01 2022 NAMESPACE: oamns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get ing -n oamns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE access-ingress \u0026lt;none\u0026gt; * 10.101.132.251 80 2m53s Find the node port of NGINX using the following command:\n$ kubectl --namespace \u0026lt;domain_namespace\u0026gt; get services -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller For example:\n$ kubectl --namespace oamns get services -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n31051 Run the following command to check the ingress:\n$ kubectl describe ing access-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe ing access-ingress -n oamns The output will look similar to the following:\nName: access-ingress Namespace: oamns Address: 10.101.132.251 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console accessdomain-adminserver:7001 (10.244.6.63:7001) /rreg/rreg accessdomain-adminserver:7001 (10.244.6.63:7001) /em accessdomain-adminserver:7001 (10.244.6.63:7001) /oamconsole accessdomain-adminserver:7001 (10.244.6.63:7001) /dms accessdomain-adminserver:7001 (10.244.6.63:7001) /oam/services/rest accessdomain-adminserver:7001 (10.244.6.63:7001) /iam/admin/config accessdomain-adminserver:7001 (10.244.6.63:7001) /iam/admin/diag accessdomain-adminserver:7001 (10.244.6.63:7001) /iam/access accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) /oam/admin/api accessdomain-adminserver:7001 (10.244.6.63:7001) /oam/services/rest/access/api accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) /access accessdomain-cluster-policy-cluster:15100 (10.244.5.13:15100,10.244.6.65:15100) / accessdomain-cluster-oam-cluster:14100 (10.244.5.12:14100,10.244.6.64:14100) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: oam-nginx meta.helm.sh/release-namespace: oamns nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 6m22s (x2 over 6m31s) nginx-ingress-controller Scheduled for sync To confirm that the new ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\n$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\na) For NodePort\n$ curl -v -k https://masternode.example.com:31051/weblogic/ready b) For LoadBalancer:\n$ curl -v -k https://loadbalancer.example.com/weblogic/ready The output will look similar to the following:\n* Trying 12.345.67.89... * Connected to 12.345.67.89 (12.345.67.89) port 31051 (#0) * Initializing NSS with certpath: sql:/etc/pki/nssdb * skipping SSL peer certificate verification * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com * start date: Nov 01 14:31:07 2021 GMT * expire date: Nov 01 14:31:07 2022 GMT * common name: masternode.example.com * issuer: CN=masternode.example.com \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: masternode.example.com:31051 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Date: Mon, 01 Nov 2021 15:06:12 GMT \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; Strict-Transport-Security: max-age=15724800; includeSubDomains \u0026lt; * Connection #0 to host 12.345.67.89 left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31051) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/configure-ingress/", + "title": "Configure an ingress for an OIG domain", + "tags": [], + "description": "This document provides steps to configure an Ingress to direct traffic to the OIG domain.", + "content": "Choose one of the following supported methods to configure an Ingress to direct traffic for your OIG domain.\n a. Using an Ingress with NGINX (non-SSL) Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL).\n b. Using an Ingress with NGINX (SSL) Steps to set up an Ingress for NGINX to direct traffic to the OIG domain using SSL.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/configure-ingress/", + "title": "Configure an Ingress for OID", + "tags": [], + "description": "This document provides steps to configure an ingress controller to direct traffic to OID.", + "content": " Introduction\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Access to interfaces through ingress\na. Using LDAP utilities\nb. Validate access using LDAP utilities\nc. Validate OID using Oracle Directory Services Manager\n Introduction The instructions below explain how to set up NGINX as an ingress for OID.\nBy default the ingress configuration only supports HTTP and HTTPS ports. To allow LDAP and LDAPS communication over TCP, configuration is required at the ingress controller/implementation level.\nInstall NGINX Use Helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace mynginx The output will look similar to the following:\nnamespace/mynginx created Install NGINX using helm Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml that contains the following:\nNote: The configuration below:\n Assumes you have oid installed with value oid as a deployment/release name in the namespace oidns. If using a different deployment name and/or namespace change appropriately. Deploys an ingress using NodePort. If using an external loadbalancer, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller. # Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 3060: oidns/oid-lbr-ldap:3060 # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 3131: oidns/oid-lbr-ldap:3131 3061: oidns/oidhost1:3060 3130: oidns/oidhost1:3131 3062: oidns/oidhost2:3060 3132: oidns/oidhost2:3131 3063: oidns/oidhost3:3060 3133: oidns/oidhost3:3131 3064: oidns/oidhost4:3060 3134: oidns/oidhost4:3131 3065: oidns/oidhost5:3060 3135: oidns/oidhost5:3131 controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oidns/oid-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: NodePort # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. # nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress # http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress # https: 30443 #tcp: # For LDAP Interface # 3060: 31389 # For LDAPS Interface # 3131: 31636 To install and configure NGINX Ingress issue the following command:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx \\ --set controller.admissionWebhooks.enabled=false Where:\n lbr-nginx is your deployment name stable/ingress-nginx is the chart reference For example:\n$ helm install --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx \\ --set controller.admissionWebhooks.enabled=false The output will look similar to the following:\nNAME: lbr-nginx LAST DEPLOYED: Wed Mar 16 16:49:35 2022 NAMESPACE: mynginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Access to interfaces through ingress To view the ports for the ingress run the following command:\n$ kubectl get all -n mynginx The output will look similar to the following:\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/lbr-nginx-ingress-nginx-controller NodePort 10.97.43.76 \u0026lt;none\u0026gt; 80:30096/TCP,443:31581/TCP,3060:31862/TCP,3061:30271/TCP,3062:31507/TCP,3063:30673/TCP,3064:31562/TCP,3065:30294/TCP,3130:31220/TCP,3131:30127/TCP,3132:31969/TCP,3133:32649/TCP,3134:32042/TCP,3135:30408/TCP 71s NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/lbr-nginx-ingress-nginx-controller 1/1 1 1 71s NAME DESIRED CURRENT READY AGE replicaset.apps/lbr-nginx-ingress-nginx-controller-d5577cfd7 1 1 1 71s Using LDAP utilities To use Oracle LDAP utilities such as ldapbind, ldapsearch, ldapmodify etc. you can either:\n Run the LDAP commands from an OID installation outside the Kubernetes cluster. This requires access to an On-Premises OID installation oustide the Kubernetes cluster.\n Run the LDAP commands from inside the OID Kubernetes pod. Execute the following command to enter the pod:\n$ kubectl exec -ti \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- bash For example:\n$ kubectl exec -ti oidhost1 -n oidns -- bash This will take you into a bash session in the pod:\n[oracle@oidhost1 oracle]$ Inside the container navigate to /u01/oracle/bin to view the LDAP utilties:\n[oracle@oidhost1 oracle]$ cd /u01/oracle/bin [oracle@oidhost1 bin]$ ls ldap* ldapadd ldapaddmt ldapbind ldapcompare ldapdelete ldapmoddn ldapmodify ldapmodifymt ldapsearch Note: For commands that require an ldif file, copy the file into the \u0026lt;persistent_volume\u0026gt;/oud_user_projects directory:\n$ cp file.ldif \u0026lt;peristent_volume\u0026gt;/oid_user_projects For example:\n$ cp file.ldif /scratch/shared/oid_user_projects The file can then be viewed inside the pod:\n[oracle@oidhost1 bin]$ cd /u01/oracle/user_projects [oracle@oidhost1 user_projects]$ ls *.ldif file.ldif Validate access using LDAP utilities Use an LDAP client such as ldapbind to connect to the OID service. In the example below ldapbind is used from inside the OID Kubernetes pod:\n[oracle@oidhost1 bin]$ ldapbind -D cn=orcladmin -w \u0026lt;password\u0026gt; -h \u0026lt;hostname_ingress\u0026gt; -p 31862 where:\n -p 31862 : is the port mapping to the LDAP port 3060 (3060:31862) from the earlier kubectl command -h \u0026lt;hostname_ingress\u0026gt; : is the hostname where the ingress is running The output should look similar to the following:\nbind successful Validate OID using Oracle Directory Services Manager Access the Oracle WebLogic Server Administration Console and Oracle Directory Services Manager (ODSM) via a browser using the service port which maps to HTTPS port 443. In this example the port is 31581 (443:31581) from the earlier kubectl command. Oracle WebLogic Server Administration Console : https://\u0026lt;hostname_ingress\u0026gt;:31581/console.\nWhen prompted, enter the username and password which corresponds to [adminUser] and [adminPassword] passed in Create OID instances.\n Oracle Directory Services Manager : https://\u0026lt;hostname_ingress\u0026gt;:31851/odsm.\nSelect Create a New Connection and, when prompted, enter the following values.\n Server: \u0026lt;hostname_ingress\u0026gt; Port: Ingress mapped port for LDAP or LDAPS, in the example above 3060:31862/TCP or 3131:30127/TCP, namely LDAP:31862, LDAPS:30127 SSL Enabled: select if accessing LDAPS. User Name: cn=orcladmin Password: value of orcladminPassword passed in Create OID instances " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/configure-ingress/", + "title": "Configure an Ingress for OUD", + "tags": [], + "description": "This document provides steps to configure an ingress controller to direct traffic to OUD.", + "content": " Introduction\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Access to interfaces through ingress\na. Changes in /etc/hosts to validate hostname based ingress rules\nb. Using LDAP utilities\nc. Validate access using LDAP\nd. Validate access using HTTPS\n Introduction The instructions below explain how to set up NGINX as an ingress for OUD.\nBy default the ingress configuration only supports HTTP and HTTPS ports. To allow LDAP and LDAPS communication over TCP, configuration is required at the ingress controller/implementation level.\nInstall NGINX Use Helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace mynginx The output will look similar to the following:\nnamespace/mynginx created Install NGINX using helm Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml that contains the following:\nNote: The configuration below:\n Assumes that you have oud-ds-rs installed with value oud-ds-rs as a deployment/release name in the namespace oudns. If using a different deployment name and/or namespace change appropriately. Deploys an ingress using LoadBalancer. If you prefer to use NodePort, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller. # Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 1389: oudns/oud-ds-rs-lbr-ldap:ldap # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 1636: oudns/oud-ds-rs-lbr-ldap:ldaps controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudns/oud-ds-rs-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 tcp: # For LDAP Interface 1389: 31389 # For LDAPS Interface 1636: 31636 To install and configure NGINX Ingress issue the following command:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Where:\n lbr-nginx is your deployment name stable/ingress-nginx is the chart reference For example:\n$ helm install --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx The output will look similar to the following:\nNAME: lbr-nginx LAST DEPLOYED: Wed Mar 16 16:49:35 2022 NAMESPACE: mynginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1beta1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Optional: Command helm upgrade to update nginx-ingress If required, an nginx-ingress deployment can be updated/upgraded with following command. In this example, nginx-ingress configuration is updated with an additional TCP port and Node Port for accessing the LDAP/LDAPS port of a specific POD:\n Create a nginx-ingress-values-override.yaml that contains the following:\n# Configuration for additional TCP ports to be exposed through Ingress # Format for each port would be like: # \u0026lt;PortNumber\u0026gt;: \u0026lt;Namespace\u0026gt;/\u0026lt;Service\u0026gt; tcp: # Map 1389 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAP Port 1389: oudns/oud-ds-rs-lbr-ldap:ldap # Map 1636 TCP port to LBR LDAP service to get requests handled through any available POD/Endpoint serving LDAPS Port 1636: oudns/oud-ds-rs-lbr-ldap:ldaps # Map specific ports for LDAP and LDAPS communication from individual Services/Pods # To redirect requests on 3890 port to oudns/oud-ds-rs-ldap-0:ldap 3890: oudns/oud-ds-rs-ldap-0:ldap # To redirect requests on 6360 port to oudns/oud-ds-rs-ldaps-0:ldap 6360: oudns/oud-ds-rs-ldap-0:ldaps # To redirect requests on 3891 port to oudns/oud-ds-rs-ldap-1:ldap 3891: oudns/oud-ds-rs-ldap-1:ldap # To redirect requests on 6361 port to oudns/oud-ds-rs-ldaps-1:ldap 6361: oudns/oud-ds-rs-ldap-1:ldaps # To redirect requests on 3892 port to oudns/oud-ds-rs-ldap-2:ldap 3892: oudns/oud-ds-rs-ldap-2:ldap # To redirect requests on 6362 port to oudns/oud-ds-rs-ldaps-2:ldap 6362: oudns/oud-ds-rs-ldap-2:ldaps # Map 1444 TCP port to LBR Admin service to get requests handled through any available POD/Endpoint serving Admin LDAPS Port 1444: oudns/oud-ds-rs-lbr-admin:adminldaps # To redirect requests on 4440 port to oudns/oud-ds-rs-0:adminldaps 4440: oudns/oud-ds-rs-0:adminldaps # To redirect requests on 4441 port to oudns/oud-ds-rs-1:adminldaps 4441: oudns/oud-ds-rs-1:adminldaps # To redirect requests on 4442 port to oudns/oud-ds-rs-2:adminldaps 4442: oudns/oud-ds-rs-2:adminldaps controller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudns/oud-ds-rs-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defied/configured, Node Port would be assigend automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 tcp: # For LDAP Interface referring to LBR LDAP services serving LDAP port 1389: 31389 # For LDAPS Interface referring to LBR LDAP services serving LDAPS port 1636: 31636 # For LDAP Interface from specific service oud-ds-rs-ldap-0 3890: 30890 # For LDAPS Interface from specific service oud-ds-rs-ldap-0 6360: 30360 # For LDAP Interface from specific service oud-ds-rs-ldap-1 3891: 30891 # For LDAPS Interface from specific service oud-ds-rs-ldap-1 6361: 30361 # For LDAP Interface from specific service oud-ds-rs-ldap-2 3892: 30892 # For LDAPS Interface from specific service oud-ds-rs-ldap-2 6362: 30362 # For LDAPS Interface referring to LBR Admin services serving adminldaps port 1444: 31444 # For Admin LDAPS Interface from specific service oud-ds-rs-0 4440: 30440 # For Admin LDAPS Interface from specific service oud-ds-rs-1 4441: 30441 # For Admin LDAPS Interface from specific service oud-ds-rs-2 4442: 30442 Run the following command to upgrade the ingress:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Where:\n lbr-nginx is your deployment name stable/ingress-nginx is the chart reference For example:\n$ helm upgrade --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Access to interfaces through ingress Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Interfaces through ingress.\n Port NodePort Host Example Hostname Path Backend Service:Port Example Service Name:Port http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin-0 oud-ds-rs-admin-0 * \u0026lt;deployment/release name\u0026gt;-0:adminhttps oud-ds-rs-0:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin-N oud-ds-rs-admin-N * \u0026lt;deployment/release name\u0026gt;-N:adminhttps oud-ds-rs-1:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-admin oud-ds-rs-admin * \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminhttps oud-ds-rs-lbr-admin:adminhttps http/https 30080/30443 * * /rest/v1/admin \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminhttps oud-ds-rs-lbr-admin:adminhttps http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http-0 oud-ds-rs-http-0 * \u0026lt;deployment/release name\u0026gt;-http-0:http oud-ds-rs-http-0:http http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http-N oud-ds-rs-http-N * \u0026lt;deployment/release name\u0026gt;-http-N:http oud-ds-rs-http-N:http http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-http oud-ds-rs-http * \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http http/https 30080/30443 * * /rest/v1/directory \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http http/https 30080/30443 * * /iam/directory \u0026lt;deployment/release name\u0026gt;-lbr-http:http oud-ds-rs-lbr-http:http In the table above, example values are based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for Helm chart installation.The NodePorts mentioned in the table are according to ingress configuration described in previous section.When External LoadBalancer is not available/configured, interfaces can be accessed through NodePort on a Kubernetes node.\n For LDAP/LDAPS access (based on the updated/upgraded configuration mentioned in previous section)\n Port NodePort Backend Service:Port Example Service Name:Port 1389 31389 \u0026lt;deployment/release name\u0026gt;-lbr-ldap:ldap oud-ds-rs-lbr-ldap:ldap 1636 31636 \u0026lt;deployment/release name\u0026gt;-lbr-ldap:ldap oud-ds-rs-lbr-ldap:ldaps 1444 31444 \u0026lt;deployment/release name\u0026gt;-lbr-admin:adminldaps oud-ds-rs-lbr-admin:adminldaps 3890 30890 \u0026lt;deployment/release name\u0026gt;-ldap-0:ldap oud-ds-rs-ldap-0:ldap 6360 30360 \u0026lt;deployment/release name\u0026gt;-ldap-0:ldaps oud-ds-rs-ldap-0:ldaps 3891 30891 \u0026lt;deployment/release name\u0026gt;-ldap-1:ldap oud-ds-rs-ldap-1:ldap 6361 30361 \u0026lt;deployment/release name\u0026gt;-ldap-1:ldaps oud-ds-rs-ldap-1:ldaps 3892 30892 \u0026lt;deployment/release name\u0026gt;-ldap-2:ldap oud-ds-rs-ldap-2:ldap 6362 30362 \u0026lt;deployment/release name\u0026gt;-ldap-2:ldaps oud-ds-rs-ldap-2:ldaps 4440 30440 \u0026lt;deployment/release name\u0026gt;-0:adminldaps oud-ds-rs-ldap-0:adminldaps 4441 30441 \u0026lt;deployment/release name\u0026gt;-1:adminldaps oud-ds-rs-ldap-1:adminldaps 4442 30442 \u0026lt;deployment/release name\u0026gt;-2:adminldaps oud-ds-rs-ldap-2:adminldaps In the table above, example values are based on value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for helm chart installation. The NodePorts mentioned in the table are according to Ingress configuration described in previous section. When external LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on a Kubernetes Node. Changes in /etc/hosts to validate hostname based ingress rules If it is not possible to have a LoadBalancer configuration updated to have host names added for Oracle Unified Directory Interfaces then the following entries can be added in /etc/hosts files on the host from where Oracle Unified Directory interfaces will be accessed.\n\u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toud-ds-rs-http oud-ds-rs-http-0 oud-ds-rs-http-1 oud-ds-rs-http-2 oud-ds-rs-http-N \u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toud-ds-rs-admin oud-ds-rs-admin-0 oud-ds-rs-admin-1 oud-ds-rs-admin-2 oud-ds-rs-admin-N In the table above, host names are based on the value \u0026lsquo;oud-ds-rs\u0026rsquo; as the deployment/release name for Helm chart installation. When External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on Kubernetes Node. Using LDAP utilities To use Oracle LDAP utilities such as ldapbind, ldapsearch, ldapmodify etc. you can either:\n Run the LDAP commands from an OUD installation outside the Kubernetes cluster. This requires access to an On-Premises OUD installation oustide the Kubernetes cluster.\n Run the LDAP commands from inside the OUD Kubernetes pod.\n$ kubectl exec -ti \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- bash For example:\n$ kubectl exec -ti oud-ds-rs-0 -n oudns -- bash This will take you into a bash session in the pod:\n[oracle@oud-ds-rs-0 oracle]$ Inside the container navigate to /u01/oracle/oud/bin to view the LDAP utilties:\n[oracle@oud-ds-rs-0 oracle]$ cd /u01/oracle/oud/bin [oracle@oud-ds-rs-0 bin]$ ls ldap* ldapcompare ldapdelete ldapmodify ldappasswordmodify ldapsearch Note: For commands that require an ldif file, copy the file into the \u0026lt;persistent_volume\u0026gt;/oud_user_projects directory:\n$ cp file.ldif \u0026lt;peristent_volume\u0026gt;/oud_user_projects For example:\n$ cp file.ldif /scratch/shared/oud_user_projects The file can then be viewed inside the pod:\n[oracle@oud-ds-rs-0 bin]$ cd /u01/oracle/oud_user_projects [oracle@oud-ds-rs-0 user_projects]$ ls *.ldif file.ldif Validate access using LDAP Note: The examples assume sample data was installed when creating the OUD instance.\nLDAP against External Load Balancer Note If your ingress is configured with type: LoadBalancer then you cannot connect to the external LoadBalancer hostname and ports from inside the pod and must connect from an OUD installation outside the cluster.\n Command to perform ldapsearch against External LBR and LDAP port\n$OUD_HOME/bin/ldapsearch --hostname \u0026lt;External LBR\u0026gt; --port 1389 \\ -D \u0026#34;\u0026lt;Root User DN\u0026gt;\u0026#34; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026#34;\u0026#34; -s base \u0026#34;(objectClass=*)\u0026#34; \u0026#34;*\u0026#34; The output will look similar to the following:\ndn: objectClass: top objectClass: ds-root-dse lastChangeNumber: 0 firstChangeNumber: 0 changelog: cn=changelog entryDN: pwdPolicySubentry: cn=Default Password Policy,cn=Password Policies,cn=config subschemaSubentry: cn=schema supportedAuthPasswordSchemes: SHA256 supportedAuthPasswordSchemes: SHA1 supportedAuthPasswordSchemes: SHA384 supportedAuthPasswordSchemes: SHA512 supportedAuthPasswordSchemes: MD5 numSubordinates: 1 supportedFeatures: 1.3.6.1.1.14 supportedFeatures: 1.3.6.1.4.1.4203.1.5.1 supportedFeatures: 1.3.6.1.4.1.4203.1.5.2 supportedFeatures: 1.3.6.1.4.1.4203.1.5.3 lastExternalChangelogCookie: vendorName: Oracle Corporation vendorVersion: Oracle Unified Directory 12.2.1.4.0 componentVersion: 4 releaseVersion: 1 platformVersion: 0 supportedLDAPVersion: 2 supportedLDAPVersion: 3 supportedControl: 1.2.826.0.1.3344810.2.3 supportedControl: 1.2.840.113556.1.4.1413 supportedControl: 1.2.840.113556.1.4.319 supportedControl: 1.2.840.113556.1.4.473 supportedControl: 1.2.840.113556.1.4.805 supportedControl: 1.3.6.1.1.12 supportedControl: 1.3.6.1.1.13.1 supportedControl: 1.3.6.1.1.13.2 supportedControl: 1.3.6.1.4.1.26027.1.5.2 supportedControl: 1.3.6.1.4.1.26027.1.5.4 supportedControl: 1.3.6.1.4.1.26027.1.5.5 supportedControl: 1.3.6.1.4.1.26027.1.5.6 supportedControl: 1.3.6.1.4.1.26027.2.3.1 supportedControl: 1.3.6.1.4.1.26027.2.3.2 supportedControl: 1.3.6.1.4.1.26027.2.3.4 supportedControl: 1.3.6.1.4.1.42.2.27.8.5.1 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.2 supportedControl: 1.3.6.1.4.1.42.2.27.9.5.8 supportedControl: 1.3.6.1.4.1.4203.1.10.1 supportedControl: 1.3.6.1.4.1.4203.1.10.2 supportedControl: 2.16.840.1.113730.3.4.12 supportedControl: 2.16.840.1.113730.3.4.16 supportedControl: 2.16.840.1.113730.3.4.17 supportedControl: 2.16.840.1.113730.3.4.18 supportedControl: 2.16.840.1.113730.3.4.19 supportedControl: 2.16.840.1.113730.3.4.2 supportedControl: 2.16.840.1.113730.3.4.3 supportedControl: 2.16.840.1.113730.3.4.4 supportedControl: 2.16.840.1.113730.3.4.5 supportedControl: 2.16.840.1.113730.3.4.9 supportedControl: 2.16.840.1.113894.1.8.21 supportedControl: 2.16.840.1.113894.1.8.31 supportedControl: 2.16.840.1.113894.1.8.36 maintenanceVersion: 2 supportedSASLMechanisms: PLAIN supportedSASLMechanisms: EXTERNAL supportedSASLMechanisms: CRAM-MD5 supportedSASLMechanisms: DIGEST-MD5 majorVersion: 12 orclGUID: D41D8CD98F003204A9800998ECF8427E entryUUID: d41d8cd9-8f00-3204-a980-0998ecf8427e ds-private-naming-contexts: cn=schema hasSubordinates: true nsUniqueId: d41d8cd9-8f003204-a9800998-ecf8427e structuralObjectClass: ds-root-dse supportedExtension: 1.3.6.1.4.1.4203.1.11.1 supportedExtension: 1.3.6.1.4.1.4203.1.11.3 supportedExtension: 1.3.6.1.1.8 supportedExtension: 1.3.6.1.4.1.26027.1.6.3 supportedExtension: 1.3.6.1.4.1.26027.1.6.2 supportedExtension: 1.3.6.1.4.1.26027.1.6.1 supportedExtension: 1.3.6.1.4.1.1466.20037 namingContexts: cn=changelog namingContexts: dc=example,dc=com Command to perform ldapsearch against External LBR and LDAP port for specific Oracle Unified Directory Interface\n$OUD_HOME/bin/ldapsearch --hostname \u0026lt;External LBR\u0026gt; --port 3890 \\ -D \u0026#34;\u0026lt;Root User DN\u0026gt;\u0026#34; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026#34;\u0026#34; -s base \u0026#34;(objectClass=*)\u0026#34; \u0026#34;*\u0026#34; LDAPS against Kubernetes NodePort for Ingress Controller Service In the example below LDAP utilities are executed from inside the oud-ds-rs-0 pod. If your ingress is configured with type: LoadBalancer you can connect to the Kubernetes hostname where the ingress is deployed using the NodePorts.\n Command to perform ldapsearch against Kubernetes NodePort and LDAP port\n[oracle@oud-ds-rs-0 bin]$ ldapsearch --hostname \u0026lt;Kubernetes Node\u0026gt; --port 31636 \\ --useSSL --trustAll \\ -D \u0026#34;\u0026lt;Root User DN\u0026gt;\u0026#34; -w \u0026lt;Password for Root User DN\u0026gt; \\ -b \u0026#34;\u0026#34; -s base \u0026#34;(objectClass=*)\u0026#34; \u0026#34;*\u0026#34; Validate access using HTTPS HTTPS/REST API against External LBR Host:Port Note: In all the examples below:\na) You need to have an external IP assigned at ingress level.\nb) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.\nc) Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64.\n Command to invoke Data REST API:\n$curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://\u0026lt;External LBR Host\u0026gt;/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub\u0026amp;attributes=*\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp The output will look similar to the following:\n{ \u0026#34;msgType\u0026#34; : \u0026#34;urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse\u0026#34;, \u0026#34;totalResults\u0026#34; : 1, \u0026#34;searchResultEntries\u0026#34; : [ { \u0026#34;dn\u0026#34; : \u0026#34;uid=user.1,ou=People,dc=example,dc=com\u0026#34;, \u0026#34;attributes\u0026#34; : { \u0026#34;st\u0026#34; : \u0026#34;OH\u0026#34;, \u0026#34;employeeNumber\u0026#34; : \u0026#34;1\u0026#34;, \u0026#34;postalCode\u0026#34; : \u0026#34;93694\u0026#34;, \u0026#34;description\u0026#34; : \u0026#34;This is the description for Aaren Atp.\u0026#34;, \u0026#34;telephoneNumber\u0026#34; : \u0026#34;+1 390 103 6917\u0026#34;, \u0026#34;homePhone\u0026#34; : \u0026#34;+1 280 375 4325\u0026#34;, \u0026#34;initials\u0026#34; : \u0026#34;ALA\u0026#34;, \u0026#34;objectClass\u0026#34; : [ \u0026#34;top\u0026#34;, \u0026#34;inetorgperson\u0026#34;, \u0026#34;organizationalperson\u0026#34;, \u0026#34;person\u0026#34; ], \u0026#34;uid\u0026#34; : \u0026#34;user.1\u0026#34;, \u0026#34;sn\u0026#34; : \u0026#34;Atp\u0026#34;, \u0026#34;street\u0026#34; : \u0026#34;70110 Fourth Street\u0026#34;, \u0026#34;mobile\u0026#34; : \u0026#34;+1 680 734 6300\u0026#34;, \u0026#34;givenName\u0026#34; : \u0026#34;Aaren\u0026#34;, \u0026#34;mail\u0026#34; : \u0026#34;user.1@maildomain.net\u0026#34;, \u0026#34;l\u0026#34; : \u0026#34;New Haven\u0026#34;, \u0026#34;postalAddress\u0026#34; : \u0026#34;Aaren Atp$70110 Fourth Street$New Haven, OH 93694\u0026#34;, \u0026#34;pager\u0026#34; : \u0026#34;+1 850 883 8888\u0026#34;, \u0026#34;cn\u0026#34; : \u0026#34;Aaren Atp\u0026#34; } } ] } Command to invoke Data REST API against specific Oracle Unified Directory Interface:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://oud-ds-rs-http-0/rest/v1/directory/uid=user.1,ou=People,dc=example,dc=com?scope=sub\u0026amp;attributes=*\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp For this example, it is assumed that the value \u0026lsquo;oud-ds-rs\u0026rsquo; is used as the deployment/release name for helm chart installation. It is assumed that \u0026lsquo;oud-ds-rs-http-0\u0026rsquo; points to an External LoadBalancer HTTPS/REST API against Kubernetes NodePort for Ingress Controller Service Note: In all the examples below:\na) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.\nb) Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64.\nc) It is assumed that the value \u0026lsquo;oud-ds-rs\u0026rsquo; is used as the deployment/release name for helm chart installation.\n Command to invoke Data SCIM API:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://\u0026lt;Kubernetes Node\u0026gt;:30443/iam/directory/oud/scim/v1/Users\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp The output will look similar to the following:\n{ \u0026#34;Resources\u0026#34; : [ { \u0026#34;id\u0026#34; : \u0026#34;ad55a34a-763f-358f-93f9-da86f9ecd9e4\u0026#34;, \u0026#34;userName\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;user.0\u0026#34; } ], \u0026#34;schemas\u0026#34; : [ \u0026#34;urn:ietf:params:scim:schemas:core:2.0:User\u0026#34;, \u0026#34;urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User\u0026#34;, \u0026#34;urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\u0026#34; ], \u0026#34;meta\u0026#34; : { \u0026#34;location\u0026#34; : \u0026#34;http://\u0026lt;Kubernetes Node\u0026gt;:30443/iam/directory/oud/scim/v1/Users/ad55a34a-763f-358f-93f9-da86f9ecd9e4\u0026#34;, \u0026#34;resourceType\u0026#34; : \u0026#34;User\u0026#34; }, \u0026#34;addresses\u0026#34; : [ { \u0026#34;postalCode\u0026#34; : \u0026#34;50369\u0026#34;, \u0026#34;formatted\u0026#34; : \u0026#34;Aaccf Amar$01251 Chestnut Street$Panama City, DE 50369\u0026#34;, \u0026#34;streetAddress\u0026#34; : \u0026#34;01251 Chestnut Street\u0026#34;, \u0026#34;locality\u0026#34; : \u0026#34;Panama City\u0026#34;, \u0026#34;region\u0026#34; : \u0026#34;DE\u0026#34; } ], \u0026#34;urn:ietf:params:scim:schemas:extension:oracle:2.0:OUD:User\u0026#34; : { \u0026#34;description\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;This is the description for Aaccf Amar.\u0026#34; } ], \u0026#34;mobile\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 010 154 3228\u0026#34; } ], \u0026#34;pager\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 779 041 6341\u0026#34; } ], \u0026#34;objectClass\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;top\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;organizationalperson\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;person\u0026#34; }, { \u0026#34;value\u0026#34; : \u0026#34;inetorgperson\u0026#34; } ], \u0026#34;initials\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;ASA\u0026#34; } ], \u0026#34;homePhone\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 225 216 5900\u0026#34; } ] }, \u0026#34;name\u0026#34; : [ { \u0026#34;givenName\u0026#34; : \u0026#34;Aaccf\u0026#34;, \u0026#34;familyName\u0026#34; : \u0026#34;Amar\u0026#34;, \u0026#34;formatted\u0026#34; : \u0026#34;Aaccf Amar\u0026#34; } ], \u0026#34;emails\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;user.0@maildomain.net\u0026#34; } ], \u0026#34;phoneNumbers\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;+1 685 622 6202\u0026#34; } ], \u0026#34;urn:ietf:params:scim:schemas:extension:enterprise:2.0:User\u0026#34; : { \u0026#34;employeeNumber\u0026#34; : [ { \u0026#34;value\u0026#34; : \u0026#34;0\u0026#34; } ] } } , . . . } Command to invoke Data SCIM API against specific Oracle Unified Directory Interface:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --location \\ --request GET \u0026#39;https://oud-ds-rs-http-0:30443/iam/directory/oud/scim/v1/Users\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp HTTPS/REST Admin API Note: In all the examples below:\na) | json_pp is used to format output in readable json format on the client side. It can be ignored if you do not have the json_pp library.\nb) Base64 of userDN:userPassword can be generated using echo -n \u0026quot;userDN:userPassword\u0026quot; | base64.\n Command to invoke Admin REST API against External LBR:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --insecure --location \\ --request GET \u0026#39;https://\u0026lt;External LBR Host\u0026gt;/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry\u0026#39; \\ --header \u0026#39;Content-Type: application/json\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp The output will look similar to the following:\n{ \u0026#34;totalResults\u0026#34; : 1, \u0026#34;searchResultEntries\u0026#34; : [ { \u0026#34;dn\u0026#34; : \u0026#34;\u0026#34;, \u0026#34;attributes\u0026#34; : { \u0026#34;vendorVersion\u0026#34; : \u0026#34;Oracle Unified Directory 12.2.1.4.0\u0026#34;, \u0026#34;ds-private-naming-contexts\u0026#34; : [ \u0026#34;cn=admin data\u0026#34;, \u0026#34;cn=ads-truststore\u0026#34;, \u0026#34;cn=backups\u0026#34;, \u0026#34;cn=config\u0026#34;, \u0026#34;cn=monitor\u0026#34;, \u0026#34;cn=schema\u0026#34;, \u0026#34;cn=tasks\u0026#34;, \u0026#34;cn=virtual acis\u0026#34;, \u0026#34;dc=replicationchanges\u0026#34; ], \u0026#34;subschemaSubentry\u0026#34; : \u0026#34;cn=schema\u0026#34;, \u0026#34;vendorName\u0026#34; : \u0026#34;Oracle Corporation\u0026#34; } } ], \u0026#34;msgType\u0026#34; : \u0026#34;urn:ietf:params:rest:schemas:oracle:oud:1.0:SearchResponse\u0026#34; } Command to invoke Admin REST API against specific Oracle Unified Directory Admin Interface:\n$ curl --noproxy \u0026#34;*\u0026#34; -k --insecure --location \\ --request GET \u0026#39;https://oud-ds-rs-admin-0/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry\u0026#39; \\ --header \u0026#39;Content-Type: application/json\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp Command to invoke Admin REST API against Kubernetes NodePort for Ingress Controller Service\n$ curl --noproxy \u0026#34;*\u0026#34; -k --insecure --location \\ --request GET \u0026#39;https://oud-ds-rs-admin-0:30443/rest/v1/admin/?scope=base\u0026amp;attributes=vendorName\u0026amp;attributes=vendorVersion\u0026amp;attributes=ds-private-naming-contexts\u0026amp;attributes=subschemaSubentry\u0026#39; \\ --header \u0026#39;Content-Type: application/json\u0026#39; \\ --header \u0026#39;Authorization: Basic \u0026lt;Base64 of userDN:userPassword\u0026gt;\u0026#39; | json_pp " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/configure-ingress/", + "title": "Configure an Ingress for OUDSM", + "tags": [], + "description": "This document provides steps to configure an ingress controller to direct traffic to OUDSM.", + "content": " Introduction\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Access to interfaces through ingress\n Introduction The instructions below explain how to set up NGINX as an ingress for OUDSM.\nInstall NGINX Use Helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace mynginx The output will look similar to the following:\nnamespace/mynginx created Install NGINX using helm Create a $WORKDIR/kubernetes/helm/nginx-ingress-values-override.yaml that contains the following:\nNote: The configuration below deploys an ingress using LoadBalancer. If you prefer to use NodePort, change the configuration accordingly. For more details about NGINX configuration see: NGINX Ingress Controller.\ncontroller: admissionWebhooks: enabled: false extraArgs: # The secret referred to by this flag contains the default certificate to be used when accessing the catch-all server. # If this flag is not provided NGINX will use a self-signed certificate. # If the TLS Secret is in different namespace, name can be mentioned as \u0026lt;namespace\u0026gt;/\u0026lt;tlsSecretName\u0026gt; default-ssl-certificate: oudsmns/oudsm-tls-cert service: # controller service external IP addresses # externalIPs: # - \u0026lt; External IP Address \u0026gt; # To configure Ingress Controller Service as LoadBalancer type of Service # Based on the Kubernetes configuration, External LoadBalancer would be linked to the Ingress Controller Service type: LoadBalancer # Configuration for NodePort to be used for Ports exposed through Ingress # If NodePorts are not defined/configured, Node Port would be assigned automatically by Kubernetes # These NodePorts are helpful while accessing services directly through Ingress and without having External Load Balancer. nodePorts: # For HTTP Interface exposed through LoadBalancer/Ingress http: 30080 # For HTTPS Interface exposed through LoadBalancer/Ingress https: 30443 To install and configure NGINX ingress issue the following command:\n$ helm install --namespace \u0026lt;namespace\u0026gt; \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx Where:\n lbr-nginx is your deployment name stable/ingress-nginx is the chart reference For example:\n$ helm install --namespace mynginx \\ --values nginx-ingress-values-override.yaml \\ lbr-nginx stable/ingress-nginx The output will be similar to the following:\nNAME: lbr-nginx LAST DEPLOYED: Mon Mar 21 17:07:32 2022 NAMESPACE: mynginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace mynginx get services -o wide -w lbr-nginx-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: name: example namespace: foo spec: ingressClassName: nginx rules: - host: www.example.com http: paths: - pathType: Prefix backend: service: name: exampleService port: number: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Access to interfaces through ingress Using the Helm chart, ingress objects are created according to configuration. The following table details the rules configured in ingress object(s) for access to Oracle Unified Directory Services Manager Interfaces through ingress.\n Port NodePort Host Example Hostname Path Backend Service:Port Example Service Name:Port http/https 30080/30443 \u0026lt;deployment/release name\u0026gt;-N oudsm-N * \u0026lt;deployment/release name\u0026gt;-N:http oudsm-1:http http/https 30080/30443 * * /oudsm/console \u0026lt;deployment/release name\u0026gt;-lbr:http oudsm-lbr:http In the table above, the Example Name for each Object is based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. The NodePorts mentioned in the table are according to ingress configuration described in previous section. When an External LoadBalancer is not available/configured, interfaces can be accessed through NodePort on the Kubernetes node. Changes in /etc/hosts to validate hostname based ingress rules If it is not possible to have LoadBalancer configuration updated to have host names added for Oracle Unified Directory Services Manager Interfaces, then the following entries can be added in /etc/hosts files on the host from where Oracle Unified Directory Services Manager interfaces would be accessed.\n\u0026lt;IP Address of External LBR or Kubernetes Node\u0026gt;\toudsm oudsm-1 oudsm-2 oudsm-N In the table above, host names are based on the value \u0026lsquo;oudsm\u0026rsquo; as the deployment/release name for the Helm chart installation. When an External LoadBalancer is not available/configured, Interfaces can be accessed through NodePort on the Kubernetes Node. Validate OUDSM URL\u0026rsquo;s Launch a browser and access the OUDSM console. If using an External LoadBalancer: https://\u0026lt;External LBR Host\u0026gt;/oudsm. If not using an External LoadBalancer use https://\u0026lt;Kubernetes Node\u0026gt;:30443/oudsm. Access the WebLogic Administration console by accessing the following URL and login with weblogic/\u0026lt;password\u0026gt; where weblogic/\u0026lt;password\u0026gt; is the adminUser and adminPass set when creating the OUDSM instance. If using an External LoadBalancer: https://\u0026lt;External LBR Host\u0026gt;/console. If not using an External LoadBalancer use https://\u0026lt;Kubernetes Node\u0026gt;:30443/console. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/manage-oig-domains/monitoring-oim-domains/", + "title": "Monitoring an OIG domain", + "tags": [], + "description": "Describes the steps for Monitoring the OIG domain and Publising the logs to Elasticsearch.", + "content": "After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain.\nThe WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.\nThere are two ways to setup monitoring and you should choose one method or the other:\n Setup automatically using setup-monitoring.sh Setup using manual configuration Setup automatically using setup-monitoring.sh The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OIG domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OIG domain. It also deploys the WebLogic Server Grafana dashboard.\nFor usage details execute ./setup-monitoring.sh -h.\n Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. For example:\nversion: create-oimcluster-monitoring-inputs-v1 # Unique ID identifying your domain. # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: governancedomain # Name of the domain namespace domainNamespace: oigns # Boolean value indicating whether to install kube-prometheus-stack setupKubePrometheusStack: true # Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: # Name of the monitoring namespace monitoringNamespace: monitoring # Name of the Admin Server adminServerName: AdminServer # # Port number for admin server adminServerPort: 7001 # Cluster name soaClusterName: soa_cluster # Port number for managed server soaManagedServerPort: 8001 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTosoaCluster: true # Cluster name oimClusterName: oim_cluster # Port number for managed server oimManagedServerPort: 14000 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTooimCluster: true # Boolean to indicate if the adminNodePort will be exposed exposeMonitoringNodePort: true # NodePort to expose Prometheus prometheusNodePort: 32101 # NodePort to expose Grafana grafanaNodePort: 32100 # NodePort to expose Alertmanager alertmanagerNodePort: 32102 # Name of the Kubernetes secret for the Admin Server's username and password weblogicCredentialsSecretName: oig-domain-credentials Run the following command to setup monitoring:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./setup-monitoring.sh -i monitoring-inputs.yaml The output should be similar to the following:\nMonitoring setup in monitoring in progress node/worker-node1 not labeled node/worker-node2 not labeled node/master-node not labeled Setup prometheus-community/kube-prometheus-stack started \u0026quot;prometheus-community\u0026quot; already exists with the same configuration, skipping Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus-community\u0026quot; chart repository ...Successfully got an update from the \u0026quot;appscode\u0026quot; chart repository Update Complete. ⎈Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress NAME: monitoring LAST DEPLOYED: Thu Mar 10 14:58:56 2022 NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026quot;release=monitoring\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Setup prometheus-community/kube-prometheus-stack completed Deploy WebLogic Monitoring Exporter started Deploying WebLogic Monitoring Exporter with domainNamespace[oigns], domainUID[governancedomain], adminServerPodName[governancedomain-adminserver] % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 100 2196k 100 2196k 0 0 1763k 0 0:00:01 0:00:01 --:--:-- 20.7M created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir created /tmp/ci-GJSQsiXrFE /tmp/ci-GJSQsiXrFE $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-KeyZrdouMD /tmp/ci-KeyZrdouMD $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-QE9HawIIgT /tmp/ci-QE9HawIIgT $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://governancedomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;governancedomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Mar 10, 2022 3:00:08 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Mar 10, 2022 3:00:20 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... \u0026lt;Mar 10, 2022 3:00:21 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. \u0026lt;Mar 10, 2022 3:00:28 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... \u0026lt;Mar 10, 2022 3:00:31 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. \u0026lt;Mar 10, 2022 3:00:38 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Mar 10, 2022 3:00:41 PM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Deploy WebLogic Monitoring Exporter completed secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Deploying WebLogic Server Grafana Dashboard.... {\u0026quot;id\u0026quot;:25,\u0026quot;slug\u0026quot;:\u0026quot;weblogic-server-dashboard\u0026quot;,\u0026quot;status\u0026quot;:\u0026quot;success\u0026quot;,\u0026quot;uid\u0026quot;:\u0026quot;5yUwzbZWz\u0026quot;,\u0026quot;url\u0026quot;:\u0026quot;/d/5yUwzbZWz/weblogic-server-dashboard\u0026quot;,\u0026quot;version\u0026quot;:1} Deployed WebLogic Server Grafana Dashboard successfully Grafana is available at NodePort: 32100 Prometheus is available at NodePort: 32101 Altermanager is available at NodePort: 32102 ============================================================== Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on serviceMonitor/oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note : It may take several minutes for serviceMonitor/oigns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OIG domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.\n Cleanup To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h\n To uninstall run the following command:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./delete-monitoring.sh -i monitoring-inputs.yaml Setup using manual configuration Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OIG domain.\nDeploy the Prometheus operator Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:\n$ kubectl get nodes --show-labels If the nodes are labelled the output will look similar to the following:\nNAME STATUS ROLES AGE VERSION LABELS worker-node1 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux worker-node2 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= If the nodes are not labelled, run the following command:\n$ kubectl label nodes --all kubernetes.io/os=linux Clone Prometheus by running the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.\n Run the following command to create the namespace and custom resource definitions:\n$ cd kube-prometheus $ kubectl create -f manifests/setup The output will look similar to the following:\nnamespace/monitoring created customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created service/prometheus-operator created serviceaccount/prometheus-operator created Run the following command to created the rest of the resources:\n$ kubectl create -f manifests/ The output will look similar to the following:\nalertmanager.monitoring.coreos.com/main created prometheusrule.monitoring.coreos.com/alertmanager-main-rules created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created servicemonitor.monitoring.coreos.com/alertmanager-main created clusterrole.rbac.authorization.k8s.io/blackbox-exporter created clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created configmap/blackbox-exporter-configuration created deployment.apps/blackbox-exporter created service/blackbox-exporter created serviceaccount/blackbox-exporter created servicemonitor.monitoring.coreos.com/blackbox-exporter created secret/grafana-config created secret/grafana-datasources created configmap/grafana-dashboard-alertmanager-overview created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created configmap/grafana-dashboard-k8s-resources-cluster created configmap/grafana-dashboard-k8s-resources-namespace created configmap/grafana-dashboard-k8s-resources-node created configmap/grafana-dashboard-k8s-resources-pod created configmap/grafana-dashboard-k8s-resources-workload created configmap/grafana-dashboard-k8s-resources-workloads-namespace created configmap/grafana-dashboard-kubelet created configmap/grafana-dashboard-namespace-by-pod created configmap/grafana-dashboard-namespace-by-workload created configmap/grafana-dashboard-node-cluster-rsrc-use created configmap/grafana-dashboard-node-rsrc-use created configmap/grafana-dashboard-nodes created configmap/grafana-dashboard-persistentvolumesusage created configmap/grafana-dashboard-pod-total created configmap/grafana-dashboard-prometheus-remote-write created configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created deployment.apps/grafana created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created prometheusrule.monitoring.coreos.com/kube-prometheus-rules created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created servicemonitor.monitoring.coreos.com/kube-apiserver created servicemonitor.monitoring.coreos.com/coredns created servicemonitor.monitoring.coreos.com/kube-controller-manager created servicemonitor.monitoring.coreos.com/kube-scheduler created servicemonitor.monitoring.coreos.com/kubelet created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created prometheusrule.monitoring.coreos.com/node-exporter-rules created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created configmap/adapter-config created deployment.apps/prometheus-adapter created rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created service/prometheus-adapter created serviceaccount/prometheus-adapter created servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created prometheusrule.monitoring.coreos.com/prometheus-operator-rules created servicemonitor.monitoring.coreos.com/prometheus-operator created prometheus.monitoring.coreos.com/k8s created prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s-config created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created service/prometheus-k8s created serviceaccount/prometheus-k8s created servicemonitor.monitoring.coreos.com/prometheus-k8s created unable to recognize \u0026quot;manifests/alertmanager-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-adapter-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:\n$ kubectl patch svc grafana -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32100 }]\u0026#39; $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32101 }]\u0026#39; $ kubectl patch svc alertmanager-main -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32102 }]\u0026#39; Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.\nThe output will look similar to the following:\nservice/grafana patched service/prometheus-k8s patched service/alertmanager-main patched Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:\n$ kubectl get pods,services -o wide -n monitoring The output should look similar to the following:\npod/alertmanager-main-0 2/2 Running 0 40s 10.244.1.29 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-1 2/2 Running 0 40s 10.244.2.68 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-2 2/2 Running 0 40s 10.244.1.28 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/grafana-f8cd57fcf-zpjh2 1/1 Running 0 40s 10.244.2.69 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/kube-state-metrics-587bfd4f97-zw9zj 3/3 Running 0 38s 10.244.1.30 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-2cgrm 2/2 Running 0 38s 10.196.54.36 master-node \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-fpl7f 2/2 Running 0 38s 10.247.95.26 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-kvvnr 2/2 Running 0 38s 10.250.40.59 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-adapter-69b8496df6-9vfdp 1/1 Running 0 38s 10.244.2.70 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-0 2/2 Running 0 37s 10.244.2.71 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-1 2/2 Running 0 37s 10.244.1.31 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-operator-7649c7454f-g5b4l 2/2 Running 0 47s 10.244.2.67 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-main NodePort 10.105.76.223 \u0026lt;none\u0026gt; 9093:32102/TCP 41s alertmanager=main,app=alertmanager service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 40s app=alertmanager service/grafana NodePort 10.107.86.157 \u0026lt;none\u0026gt; 3000:32100/TCP 40s app=grafana service/kube-state-metrics ClusterIP None \u0026lt;none\u0026gt; 8443/TCP,9443/TCP 40s app.kubernetes.io/name=kube-state-metrics service/node-exporter ClusterIP None \u0026lt;none\u0026gt; 9100/TCP 39s app.kubernetes.io/name=node-exporter service/prometheus-adapter ClusterIP 10.102.244.224 \u0026lt;none\u0026gt; 443/TCP 39s name=prometheus-adapter service/prometheus-k8s NodePort 10.100.241.34 \u0026lt;none\u0026gt; 9090:32101/TCP 39s app=prometheus,prometheus=k8s service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 39s app=prometheus service/prometheus-operator ClusterIP None \u0026lt;none\u0026gt; 8443/TCP 47s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator Deploy WebLogic Monitoring Exporter Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain.\n Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ export adminServerPort=7001 $ export wlsMonitoringExporterTosoaCluster=true $ export soaManagedServerPort=8001 $ export wlsMonitoringExporterTooimCluster=true $ export oimManagedServerPort=14000 $ sh get-wls-exporter.sh The output will look similar to the following:\n % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1159 0 --:--:-- --:--:-- --:--:-- 1159 100 2196k 100 2196k 0 0 1430k 0 0:00:01 0:00:01 --:--:-- 8479k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir domainNamespace is empty, setting to default oimcluster domainUID is empty, setting to default oimcluster weblogicCredentialsSecretName is empty, setting to default \u0026quot;oimcluster-domain-credentials\u0026quot; adminServerPort is empty, setting to default \u0026quot;7001\u0026quot; soaClusterName is empty, setting to default \u0026quot;soa_cluster\u0026quot; oimClusterName is empty, setting to default \u0026quot;oim_cluster\u0026quot; created /tmp/ci-NEZy7NOfoz /tmp/ci-NEZy7NOfoz $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-J7QJ4Nc1lo /tmp/ci-J7QJ4Nc1lo $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-f4GbaxM2aJ /tmp/ci-f4GbaxM2aJ $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true For example:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy oigns/governancedomain-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py oigns/governancedomain-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n oigns governancedomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName governancedomain -adminServerName AdminServer -adminURL governancedomain-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Nov 18, 2021 10:35:44 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Nov 18, 2021 10:35:56 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war to targets soa_cluster (upload=true) ... \u0026lt;Nov 18, 2021 10:35:59 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-soa [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-soa.war], to soa_cluster .\u0026gt; ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-soa. \u0026lt;Nov 18, 2021 10:36:12 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-soa [archive: null], to soa_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war to targets oim_cluster (upload=true) ... \u0026lt;Nov 18, 2021 10:36:15 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oim [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oim.war], to oim_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oim. \u0026lt;Nov 18, 2021 10:36:24 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oim [archive: null], to oim_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Nov 18, 2021 10:36:27 AM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Configure Prometheus Operator Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.\nThe exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: \u0026lt;password\u0026gt; in base64 encoded.\n Run the following command to get the base64 encoded version of the weblogic password:\n$ echo -n \u0026#34;\u0026lt;password\u0026gt;\u0026#34; | base64 The output will look similar to the following:\nV2VsY29tZTE= Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change any reference to the namespace and weblogic.domainName: values to match your OIG namespace and domain name. For example:\napiVersion: v1 kind: Secret metadata: name: basic-auth namespace: oigns data: password: V2VsY29tZTE= user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter namespace: oigns labels: k8s-app: wls-exporter release: monitoring spec: namespaceSelector: matchNames: - oigns selector: matchLabels: weblogic.domainName: governancedomain endpoints: - basicAuth: password: name: basic-auth key: password username: name: basic-auth key: user port: default relabelings: - action: labelmap regex: __meta_kubernetes_service_label_(.+) interval: 10s honorLabels: true path: /wls-exporter/metrics Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OIG namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: prometheus-k8s namespace: oigns rules: - apiGroups: - \u0026quot;\u0026quot; resources: - services - endpoints - pods verbs: - get - list - watch kind: RoleList Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace` to match your OIG namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: prometheus-k8s namespace: oigns roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s subjects: - kind: ServiceAccount name: prometheus-k8s namespace: monitoring kind: RoleBindingList Run the following command to enable Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests $ kubectl apply -f . The output will look similar to the following:\nrolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on oigns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note: It may take several minutes for oigns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed.\n Cleanup To clean up a manual installation:\n Run the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests/ $ kubectl delete -f . Delete the deployments:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts/ $ kubectl cp undeploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oimClusterName oim_cluster -wlsMonitoringExporterTooimCluster true -soaClusterName soa_cluster -wlsMonitoringExporterTosoaCluster true Delete Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl delete -f manifests $ kubectl delete -f manifests/setup " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/manage-oig-domains/delete-domain-home/", + "title": "Delete the OIG domain home", + "tags": [], + "description": "Learn about the steps to cleanup the OIG domain home.", + "content": "Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.\n Run the following command to delete the domain:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d \u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d governancedomain Drop the RCU schemas as follows:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f \u0026lt; /tmp/pwd.txt For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OIGK8S /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OIM -component SOAINFRA -component UCSUMS -f \u0026lt; /tmp/pwd.txt Delete the contents of the persistent volume:\n$ rm -rf \u0026lt;persistent_volume\u0026gt;/governancedomainpv/* For example:\n$ rm -rf /scratch/shared/governancedomainpv/* Delete the WebLogic Kubernetes Operator, by running the following command:\n$ helm delete weblogic-kubernetes-operator -n opns Delete the label from the OIG namespace:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator- For example:\n$ kubectl label namespaces oigns weblogic-operator- Delete the service account for the operator:\n$ kubectl delete serviceaccount \u0026lt;sample-kubernetes-operator-sa\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete serviceaccount op-sa -n opns Delete the operator namespace:\n$ kubectl delete namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl delete namespace opns To delete NGINX:\n$ helm delete governancedomain-nginx-designconsole -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete governancedomain-nginx-designconsole -n oigns Then run:\n$ helm delete governancedomain-nginx -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete governancedomain-nginx -n oigns Then run:\n$ helm delete nginx-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete nginx-ingress -n nginxssl Then delete the NGINX namespace:\n$ kubectl delete namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl delete namespace nginxssl Delete the OIG namespace:\n$ kubectl delete namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete namespace oigns " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/manage-oid-containers/", + "title": "Manage Oracle Internet Directory Containers", + "tags": [], + "description": "This document provides steps to manage Oracle Internet Directory containers.", + "content": "Important considerations for Oracle Internet Directory instances in Kubernetes.\n a) Scaling Up/Down OID Pods Describes the steps for scaling up/down for OID pods.\n c) Monitoring an Oracle Internet Directory Instance Describes the steps for Monitoring the Oracle Internet Directory environment.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/manage-oud-containers/", + "title": "Manage Oracle Unified Directory Containers", + "tags": [], + "description": "This document provides steps manage Oracle Unified Directory containers.", + "content": "Important considerations for Oracle Unified Directory instances in Kubernetes.\n a) Scaling Up/Down OUD Pods Describes the steps for scaling up/down for OUD pods.\n b) Logging and Visualization for Helm Chart oud-ds-rs Deployment Describes the steps for logging and visualization with Elasticsearch and Kibana.\n c) Monitoring an Oracle Unified Directory Instance Describes the steps for Monitoring the Oracle Unified Directory environment.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/", + "title": "Manage Oracle Unified Directory Services Manager Containers", + "tags": [], + "description": "This document provides steps to manage Oracle Unified Directory Services Manager containers.", + "content": "Important considerations for Oracle Unified Directory Services Manager instances in Kubernetes.\n a) Scaling Up/Down OUDSM Pods Describes the steps for scaling up/down for OUDSM pods.\n b) Logging and Visualization for Helm Chart oudsm Deployment Describes the steps for logging and visualization with Elasticsearch and Kibana.\n c) Monitoring an Oracle Unified Directory Services Manager Instance Describes the steps for Monitoring the Oracle Unified Directory Services Manager environment.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/validate-domain-urls/", + "title": "Validate Domain URLs", + "tags": [], + "description": "Sample for validating domain urls.", + "content": "In this section you validate the OAM domain URLs are accessible via the NGINX ingress.\nMake sure you know the master hostname and ingress port for NGINX before proceeding.\nValidate the OAM domain urls via the Ingress Launch a browser and access the following URL\u0026rsquo;s. Login with the weblogic username and password (weblogic/\u0026lt;password\u0026gt;).\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n Console or Page URL WebLogic Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console Oracle Enterprise Manager Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Oracle Access Management Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oamconsole Oracle Access Management Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/access Logout URL https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/oam/server/logout Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OAM domain. To control the Administration Server and OAM Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.\nThe browser will give certificate errors if you used a self signed certificate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.\nAfter validating the URL\u0026rsquo;s proceed to Post Install Configuraton.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/validate-domain-urls/", + "title": "Validate domain URLs", + "tags": [], + "description": "Sample for validating domain urls.", + "content": "In this section you validate the OIG domain URLs that are accessible via the NGINX ingress.\nMake sure you know the master hostname and port before proceeding.\nValidate the OIG domain urls via the ingress Launch a browser and access the following URL\u0026rsquo;s. Use http or https depending on whether you configured your ingress for non-ssl or ssl.\nLogin to the WebLogic Administration Console and Oracle Enterprise Manager Console with the WebLogic username and password (weblogic/\u0026lt;password\u0026gt;).\nLogin to Oracle Identity Governance with the xelsysadm username and password (xelsysadm/\u0026lt;password\u0026gt;).\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n Console or Page URL WebLogic Administration Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console Oracle Enterprise Manager Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Oracle Identity System Administration https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/sysadmin Oracle Identity Self Service https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/identity Note: WebLogic Administration Console and Oracle Enterprise Manager Console should only be used to monitor the servers in the OIG domain. To control the Administration Server and OIG Managed Servers (start/stop) you must use Kubernetes. See Domain Life Cycle for more information.\nThe browser will give certificate errors if you used a self signed certifcate and have not imported it into the browsers Certificate Authority store. If this occurs you can proceed with the connection and ignore the errors.\nAfter the URL\u0026rsquo;s have been verified follow Post install configuration.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Internet Directory (OID) container image used for deploying OID domains.", + "content": "As described in Prepare Your Environment you can create your own OID container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Internet Directory image for production deployments.\nCreate or update an Oracle Internet Directory image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Internet Directory image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Internet Directory image containing the Oracle Internet Directory binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OID patches because it optimizes the size of the image. Use update for patching an existing Oracle Internet Directory image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Internet Directory container image using the WebLogic Image Tool requires additional container scripts for Oracle Internet Directory domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Internet Directory image.\nDownload the Oracle Internet Directory installation binaries and patches You must download the required Oracle Internet Directory installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Internet Directory 12.2.1.4.0\n fmw_12.2.1.4.0_oid_linux64.bin Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar OID and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Internet Directory (OID) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleInternetDirectory/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleInternetDirectory/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%,%JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u321 --type oid_wls --version=12.2.1.4.0 --tag=oid-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleInternetDirectory/dockerfiles/12.2.1.4.0/install/oid.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleInternetDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleInternetDirectory/dockerfiles/12.2.1.4.0/container-scripts The \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/docker-images/OracleInternetDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt contains additional build commands. You may edit this file if you want to customize the image further.\n Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type OID --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_oid_linux64.bin $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 33727616_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33727616_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip $ imagetool cache addEntry --key 32720458_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32720458_122140_Generic.zip $ imagetool cache addEntry --key 33791665_12.2.1.4.220105 --value \u0026lt;download location\u0026gt;/p33791665_12214220105_Generic.zip $ imagetool cache addEntry --key 33723124_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33723124_122140_Generic.zip $ imagetool cache addEntry --key 32647448_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32647448_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 33591019_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33591019_122140_Generic.zip $ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32999272_122140_Generic.zip $ imagetool cache addEntry --key 33115009_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33115009_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 33697227_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33697227_122140_Generic.zip $ imagetool cache addEntry --key 33678607_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33678607_122140_Generic.zip $ imagetool cache addEntry --key 33735326_12.2.1.4.220105 --value \u0026lt;download location\u0026gt;/p33735326_12214220105_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleInternetDirectory/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33115009_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u321 --type OID --version=12.2.1.4.0 --tag=oid-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleInternetDirectory/dockerfiles/12.2.1.4.0/install/oid.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleInternetDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleInternetDirectory/dockerfiles/12.2.1.4.0/container-scripts --patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33115009_12.2.1.4.0,33591019_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OID. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Internet Directory image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleInternetDirectory/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oid The output will look similar to the following:\noid-latestpsu 12.2.1.4.0 f60ca3f0a4dc 4 minutes ago 6.06GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oid-latestpsu.tar oid-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Internet Directory image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oid:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oid 12.2.1.4.0 cd7ece753e52 2 months ago 5.82GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p33735326_12214220105_Generic.zip:\n$ imagetool cache addEntry --key=33735326_12.2.1.4.220105 --value \u0026lt;downloaded-patches-location\u0026gt;/p33735326_12214220105_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oid:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oid:12.2.1.4.0 --tag=oracle/oid-new:12.2.1.4.0 --patches=33735326_12.2.1.4.220105 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oid The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oid-new 12.2.1.4.0 e09ae10189db 38 seconds ago 5.9GB oracle/oid 12.2.1.4.0 cd7ece753e52 2 months ago 5.82GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oid-new.tar oracle/oid-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Unified Directory (OUD) container image used for deploying OUD domains.", + "content": "As described in Prepare Your Environment you can create your own OUD container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.\nCreate or update an Oracle Unified Directory image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Unified Directory image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Unified Directory image containing the Oracle Unified Directory binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUD patches because it optimizes the size of the image. Use update for patching an existing Oracle Unified Directory image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Unified Directory container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Unified Directory image.\nDownload the Oracle Unified Directory installation binaries and patches You must download the required Oracle Unified Directory installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Unified Directory 12.2.1.4.0\n fmw_12.2.1.4.0_oud.jar OUD Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Unified Directory (OUD) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%,%JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u321 --type oud --version=12.2.1.4.0 --tag=oud-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts The \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt contains additional build commands. You may edit this file if you want to customize the image further.\n Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_oud.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33448950_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33448950_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u321 --type oud --version=12.2.1.4.0 --tag=oud-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectory/dockerfiles/12.2.1.4.0/container-scripts --patches 33448950_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OUD. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Unified Directory image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectory/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oud The output will look similar to the following:\noud-latestpsu 12.2.1.4.0 30b02a692fa3 About a minute ago 1.04GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oud-latestpsu.tar oud-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Unified Directory image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oud:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oud 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value \u0026lt;downloaded-patches-location\u0026gt;/p33521773_12214211008_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oud:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oud:12.2.1.4.0 --tag=oracle/oud-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oud The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oud-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 1.11GB oracle/oud 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oud-new.tar oracle/oud-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Unified Directory Services Manager (OUDSM) container image used for deploying OUDSM domains.", + "content": "As described in Prepare Your Environment you can create your own OUDSM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Unified Directory image for production deployments.\nCreate or update an Oracle Unified Directory Services Manager image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Unified Directory Services Manager image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Unified Directory Services Manager image containing the Oracle Unified Directory Services Manager binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OUDSM patches because it optimizes the size of the image. Use update for patching an existing Oracle Unified Directory Services Manager image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Unified Directory Services Manager container image using the WebLogic Image Tool requires additional container scripts for Oracle Unified Directory Services Manager domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Unified Directory Services Manager image.\nDownload the Oracle Unified Directory Services Manager installation binaries and patches You must download the required Oracle Unified Directory Services Manager installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Unified Directory 12.2.1.4.0\n fmw_12.2.1.4.0_oud.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar OUDSM and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Unified Directory Services Manager (OUDSM) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%,%JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u321 --type oud_wls --version=12.2.1.4.0 --tag=oudsm-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts The \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt contains additional build commands. You may edit this file if you want to customize the image further.\n Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type OUD --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_oud.jar $ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 33727616_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33727616_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip $ imagetool cache addEntry --key 32720458_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32720458_122140_Generic.zip $ imagetool cache addEntry --key 33791665_12.2.1.4.220105 --value \u0026lt;download location\u0026gt;/p33791665_12214220105_Generic.zip $ imagetool cache addEntry --key 33723124_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33723124_122140_Generic.zip $ imagetool cache addEntry --key 32647448_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32647448_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 33591019_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33591019_122140_Generic.zip $ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32999272_122140_Generic.zip $ imagetool cache addEntry --key 33448950_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33448950_122140_Generic.zip $ imagetool cache addEntry --key 33697227_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33697227_122140_Generic.zip $ imagetool cache addEntry --key 33678607_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33678607_122140_Generic.zip $ imagetool cache addEntry --key 33735326_12.2.1.4.220105 --value \u0026lt;download location\u0026gt;/p33735326_12214220105_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u321 --type oud_wls --version=12.2.1.4.0 --tag=oudsm-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/install/oud.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleUnifiedDirectorySM/dockerfiles/12.2.1.4.0/container-scripts --patches 33727616_12.2.1.4.0,33093748_12.2.1.4.0,32720458_12.2.1.4.0,33791665_12.2.1.4.220105,33723124_12.2.1.4.0,32647448_12.2.1.4.0,33591019_12.2.1.4.0,32999272_12.2.1.4.0,33448950_12.2.1.4.0,33448950_12.2.1.4.0,33697227_12.2.1.4.0,33678607_12.2.1.4.0,33735326_12.2.1.4.220105 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OUDSM. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Unified Directory Services Manager image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleUnifiedDirectorySM/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oudsm The output will look similar to the following:\noudsm-latestpsu 12.2.1.4.0 f6dd9d2ca0e6 4 minutes ago 3.72GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oudsm-latestpsu.tar oudsm-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Unified Directory Services Manager image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oudsm:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oudsm 12.2.1.4.0 b051804ba15f 3 months ago 3.72GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p33521773_12214211008_Generic.zip:\n$ imagetool cache addEntry --key=33521773_12.2.1.4.211008 --value \u0026lt;downloaded-patches-location\u0026gt;/p33521773_12214211008_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oudsm:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oudsm:12.2.1.4.0 --tag=oracle/oudsm-new:12.2.1.4.0 --patches=33521773_12.2.1.4.211008 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oudsm The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oudsm-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 1.11GB oracle/oudsm 12.2.1.4.0 b051804ba15f 3 months ago 1.04GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oudsm-new.tar oracle/oudsm-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/post-install-config/", + "title": "Post Install Configuration", + "tags": [], + "description": "Post install configuration.", + "content": "Follow these post install configuration steps.\n Create a Server Overrides File Removing OAM Server from WebLogic Server 12c Default Coherence Cluster WebLogic Server Tuning Enable Virtualization Modify oamconfig.properties Create a Server Overrides File Navigate to the following directory:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/output/weblogic-domains/accessdomain Create a setUserOverrides.sh with the following contents:\nDERBY_FLAG=false JAVA_OPTIONS=\u0026quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true\u0026quot; MEM_ARGS=\u0026quot;-Xms8192m -Xmx8192m\u0026quot; Copy the setUserOverrides.sh file to the Administration Server pod:\n$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oamns/accessdomain-adminserver:/u01/oracle/user_projects/domains/accessdomain/bin/setUserOverrides.sh Where oamns is the OAM namespace and accessdomain is the DOMAIN_NAME/UID.\n Stop the OAM domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain patched Check that all the pods are stopped:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Terminating 0 27m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h29m accessdomain-oam-policy-mgr1 1/1 Terminating 0 24m accessdomain-oam-policy-mgr2 1/1 Terminating 0 24m accessdomain-oam-server1 1/1 Terminating 0 24m accessdomain-oam-server2 1/1 Terminating 0 24m helper 1/1 Running 0 4h44m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 108m The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m helper 1/1 Running 0 4h45m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 109m Start the domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oamns patch domains accessdomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h30m accessdomain-introspector-mckp2 1/1 Running 0 8s helper 1/1 Running 0 4h46m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 110m The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 5m38s accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h37m accessdomain-oam-policy-mgr1 1/1 Running 0 2m51s accessdomain-oam-policy-mgr2 1/1 Running 0 2m51s accessdomain-oam-server1 1/1 Running 0 2m50s accessdomain-oam-server2 1/1 Running 0 2m50s helper 1/1 Running 0 4h52m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 116m Removing OAM Server from WebLogic Server 12c Default Coherence Cluster Exclude all Oracle Access Management (OAM) clusters (including Policy Manager and OAM runtime server) from the default WebLogic Server 12c coherence cluster by using the WebLogic Server Administration Console.\nFrom 12.2.1.3.0 onwards, OAM server-side session management uses the database and does not require coherence cluster to be established. In some environments, warnings and errors are observed due to default coherence cluster initialized by WebLogic. To avoid or fix these errors, exclude all of the OAM clusters from default WebLogic Server coherence cluster using the following steps:\n Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, expand Environment and select Coherence Clusters. Click defaultCoherenceCluster and select the Members tab. From Servers and Clusters, deselect all OAM clusters (oam_cluster and policy_cluster). Click Save. Click Activate changes. WebLogic Server Tuning For production environments, the following WebLogic Server tuning parameters must be set:\nAdd Minimum Thread constraint to worker manager \u0026ldquo;OAPOverRestWM\u0026rdquo; Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, click Deployments. On the Deployments page click Next until you see oam_server. Expand oam_server by clicking on the + icon, then click /iam/access/binding. Click the Configuration tab, followed by the Workload tab. Click wm/OAPOverRestWM Under Application Scoped Work Managed Components, click New. In Create a New Work Manager Component, select Minumum Threads Constraint and click Next. In Minimum Threads Constraint Properties enter the Count as 400 and click Finish. In the Save Deployment Plan change the Path to the value /u01/oracle/user_projects/domains/accessdomain/Plan.xml, where accessdomain is your domain_UID. Click OK and then Activate Changes. Remove Max Thread Constraint and Capacity Constraint Repeat steps 1-7 above. Under Application Scoped Work Managed Components select the check box for Capacity and MaxThreadsCount. Click Delete. In the Delete Work Manage Components screen, click OK to delete. Click on Release Configuration and then Log Out. oamDS DataSource Tuning Login to the WebLogic Server Console at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console. Click Lock \u0026amp; Edit. In Domain Structure, Expand Services and click Data Sources. Click on oamDS. In Settings for oamDS, select the Configuration tab, and then the Connection Pool tab. Change Initial Capacity, Maximum Capacity, and Minimum Capacity to 800 and click Save. Click Activate Changes. Enable Virtualization Log in to Oracle Enterprise Manager Fusion Middleware Control at https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em Click WebLogic Domain \u0026gt; Security \u0026gt; Security Provider Configuration. Expand Security Store Provider. Expand Identity Store Provider. Click Configure. Add a custom property. Select virtualize property with value true and click OK. Click OK again to persist the change. Modify oamconfig.properties Navigate to the following directory and change permissions for the oamconfig_modify.sh:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common $ chmod 777 oamconfig_modify.sh For example:\n$ cd $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common $ chmod 777 oamconfig_modify.sh Edit the oamconfig.properties and change the OAM_NAMESPACE and LBR_HOST to match the values for your OAM Kubernetes environment. For example:\n#Below are only the sample values, please modify them as per your setup # The name space where OAM servers are created OAM_NAMESPACE='oamns' # Define the INGRESS CONTROLLER used. INGRESS=\u0026quot;nginx\u0026quot; # Define the INGRESS CONTROLLER name used during installation. INGRESS_NAME=\u0026quot;nginx-ingress\u0026quot; # FQDN of the LBR Host i.e the host from where you access oam console LBR_HOST=\u0026quot;masternode.example.com\u0026quot; Run the oamconfig_modify.sh script as follows:\n$ ./oamconfig_modify.sh \u0026lt;OAM_ADMIN_USER\u0026gt;:\u0026lt;OAM_ADMIN_PASSWORD\u0026gt; where:\nOAM_ADMIN_USER is the OAM administrator username\nOAM_ADMIN_PASSWORD is the OAM administrator password\nFor example:\n$ ./oamconfig_modify.sh weblogic:\u0026lt;password\u0026gt; Note: Make sure port 30540 is free before running the command.\nThe output will look similar to the following:\nLBR_PROTOCOL: https domainUID: accessdomain OAM_SERVER: accessdomain-oam-server OAM_NAMESPACE: oamns INGRESS: nginx INGRESS_NAME: nginx-ingress ING_TYPE : NodePort LBR_HOST: masternode.example.com LBR_PORT: 31051 Started Executing Command % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 764k 0 764k 0 0 221k 0 --:--:-- 0:00:03 --:--:-- 221k new_cluster_id: a52fc-masternode service/accessdomain-oamoap-service created accessdomain-oamoap-service NodePort 10.100.202.44 \u0026lt;none\u0026gt; 5575:30540/TCP 1s nginx-ingress-ingress-nginx-controller NodePort 10.101.132.251 \u0026lt;none\u0026gt; 80:32371/TCP,443:31051/TCP 144m HTTP/1.1 100 Continue HTTP/1.1 201 Created Date: Mon, 01 Nov 2021 16:59:12 GMT Content-Type: text/plain Content-Length: 76 Connection: keep-alive X-ORACLE-DMS-ECID: 9234b1a0-83b4-4100-9875-aa00e3f5db27-0000035f X-ORACLE-DMS-RID: 0 Set-Cookie: JSESSIONID=pSXccMR6t8B5QoyaAlOuZYSmhtseX4C4jx-0tnkmNyer8L1mOLET!402058795; path=/; HttpOnly Set-Cookie: _WL_AUTHCOOKIE_JSESSIONID=X1iqH-mtDNGyFx5ZCXMK; path=/; secure; HttpOnly Strict-Transport-Security: max-age=15724800; includeSubDomains https://masternode.example.com:31051/iam/admin/config/api/v1/config?path=%2F $WORKDIR/kubernetes/create-access-domain/domain-home-on-pv/common/output/oamconfig_modify.xml executed successfully --------------------------------------------------------------------------- Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Location changed to domainRuntime tree. This is a read-only tree with DomainMBean as the root MBean. For more help, use help('domainRuntime') Exiting WebLogic Scripting Tool. Please wait for some time for the server to restart pod \u0026quot;accessdomain-oam-server1\u0026quot; deleted pod \u0026quot;accessdomain-oam-server2\u0026quot; deleted Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. ... Waiting continuously at an interval of 10 secs for servers to start.. Waiting continuously at an interval of 10 secs for servers to start.. accessdomain-oam-server1 1/1 Running 0 4m37s accessdomain-oam-server2 1/1 Running 0 4m36s OAM servers started successfully The script will delete the accessdomain-oam-server1 and accessdomain-oam-server2 pods and then create new ones. Check the pods are running again by issuing the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 43m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 5h14m accessdomain-oam-policy-mgr1 1/1 Running 0 40m accessdomain-oam-policy-mgr2 1/1 Running 0 40m accessdomain-oam-server1 0/1 Running 0 8m3s accessdomain-oam-server2 0/1 Running 0 8m2s helper 0/1 Running 0 5h29m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 154m The accessdomain-oam-server1 and accessdomain-oam-server2 are started, but currently have a READY status of 0/1. This means oam_server1 and oam_server2 are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 49m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 5h21m accessdomain-oam-policy-mgr1 1/1 Running 0 46m accessdomain-oam-policy-mgr2 1/1 Running 0 46m accessdomain-oam-server1 1/1 Running 0 14m accessdomain-oam-server2 1/1 Running 0 14m helper 1/1 Running 0 5h36m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 160m " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/post-install-config/", + "title": "Post install configuration", + "tags": [], + "description": "Post install configuration.", + "content": "Follow these post install configuration steps.\n a. Post Install Tasks Perform post install tasks.\n b. Install and configure connectors Install and Configure Connectors.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/configure-design-console/", + "title": "Configure Design Console", + "tags": [], + "description": "Configure Design Console.", + "content": "Configure an Ingress to allow Design Console to connect to your Kubernetes cluster.\n a. Using Design Console with NGINX(non-SSL) Configure Design Console with NGINX(non-SSL).\n b. Using Design Console with NGINX(SSL) Configure Design Console with NGINX(SSL).\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/patch-and-upgrade/", + "title": "Patch and Upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OID image", + "content": "Introduction In this section the Oracle Internet Directory (OID) deployment is updated with a new OID container image.\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nYou can update the deployment with a new OID container image using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oid-patch-override.yaml file that contains:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; imagePullSecrets: - name: orclcred For example:\nimage: repository: container-registry.oracle.com/middleware/oid_cpu tag: 12.2.1.4-jdk8-ol7-new imagePullSecrets: - name: orclcred The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OID container image, then you can remove the following:\nimagePullSecrets: - name: orclcred Run the following command to upgrade the deployment:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oid-patch-override.yaml \\ \u0026lt;release_name\u0026gt; oid --reuse-values For example:\n$ helm upgrade --namespace oidns \\ --values oid-patch-override.yaml \\ oid oid --reuse-values Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to update the deployment with a new OID container image:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release_name\u0026gt; oid --reuse-values For example:\n$ helm upgrade --namespace oidns \\ --set image.repository=container-registry.oracle.com/middleware/oid_cpu,image.tag=12.2.1.4-jdk8-ol7-new \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oid oid --reuse-values The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OID container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot;. Verify the pods After updating with the new image the pods will restart. Verify the pods are running:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods -o wide For example:\n$ kubectl --namespace oidns get pods -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 45m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 45m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Note: It will take several minutes before the pods start. While the oid pods have a STATUS of 0/1 the pods are started but the OID server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n Verify the pods are using the new image by running the following command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oid-0 -n oidns The output will look similar to the following:\nName: oid-0 Namespace: oidns Priority: 0 Node: \u0026lt;Worker Node\u0026gt;/100.102.48.28 Start Time: Wed, 16 Mar 2022 12:07:36 +0000 Labels: app.kubernetes.io/instance=oid app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oid app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oid-0.1 oid/instance=oid-0 Annotations: meta.helm.sh/release-name: oid meta.helm.sh/release-namespace: oidns Status: Running IP: 10.244.1.44 etc... Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Killing 4m26s kubelet Container oid definition changed, will be restarted Warning Unhealthy 3m56s kubelet Readiness probe failed: Normal Pulling 3m56s kubelet Pulling image \u0026#34;container-registry.oracle.com/middleware/oid_cpu:12.2.1.4-jdk8-ol7-new\u0026#34; Warning Unhealthy 3m27s kubelet Liveness probe failed: dial tcp 10.244.1.44:1389: connect: connection refused Normal Created 3m22s (x2 over 142m) kubelet Created container oid Normal Started 3m22s (x2 over 142m) kubelet Started container oid Normal Pulled 3m22s kubelet Successfully pulled image \u0026#34;container-registry.oracle.com/middleware/oid_cpu:12.2.1.4-jdk8-ol7-new\u0026#34; in 33.477063844s " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/patch-and-upgrade/", + "title": "Patch and Upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OUD image", + "content": "Introduction In this section the Oracle Unified Directory (OUD) deployment is updated with a new OUD container image.\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nYou can update the deployment with a new OUD container image using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oud-patch-override.yaml file that contains:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; imagePullSecrets: - name: orclcred For example:\nimage: repository: container-registry.oracle.com/middleware/oud_cpu tag: 12.2.1.4-jdk8-ol7-new imagePullSecrets: - name: orclcred The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following:\nimagePullSecrets: - name: orclcred Run the following command to upgrade the deployment:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oud-patch-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns \\ --values oud-patch-override.yaml \\ oud-ds-rs oud-ds-rs --reuse-values Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to update the deployment with a new OUD container image:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns \\ --set image.repository=container-registry.oracle.com/middleware/oud_cpu,image.tag=12.2.1.4-jdk8-ol7-new \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oud-ds-rs oud-ds-rs --reuse-values The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OUD container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot;. Verify the pods After updating with the new image the pods will restart. Verify the pods are running:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods For example:\n$ kubectl --namespace oudns get pods The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 45m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 45m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 45m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Note: It will take several minutes before the pods start. While the oudsm pods have a STATUS of 0/1 the pods are started but the OUD server associated with it is currently starting. While the pods are starting you can check the startup status in the pod logs, by running the following command:\n Verify the pods are using the new image by running the following command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oud-ds-rs-0 -n oudns The output will look similar to the following:\nName: oud-ds-rs-0 Namespace: oudns Priority: 0 Node: \u0026lt;Worker Node\u0026gt;/100.102.48.28 Start Time: Wed, 16 Mar 2022 12:07:36 +0000 Labels: app.kubernetes.io/instance=oud-ds-rs app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oud-ds-rs app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oud-ds-rs-0.1 oud/instance=oud-ds-rs-0 Annotations: meta.helm.sh/release-name: oud-ds-rs meta.helm.sh/release-namespace: oudns Status: Running IP: 10.244.1.44 etc... Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Killing 4m26s kubelet Container oud-ds-rs definition changed, will be restarted Warning Unhealthy 3m56s kubelet Readiness probe failed: Normal Pulling 3m56s kubelet Pulling image \u0026#34;container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-new\u0026#34; Warning Unhealthy 3m27s kubelet Liveness probe failed: dial tcp 10.244.1.44:1389: connect: connection refused Normal Created 3m22s (x2 over 142m) kubelet Created container oud-ds-rs Normal Started 3m22s (x2 over 142m) kubelet Started container oud-ds-rs Normal Pulled 3m22s kubelet Successfully pulled image \u0026#34;container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-new\u0026#34; in 33.477063844s " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/patch-and-upgrade/", + "title": "Patch and Upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OUDSM image", + "content": "Introduction In this section the Oracle Unified Directory Services Manager (OUDSM) deployment is updated with a new OUDSM container image.\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nYou can update the deployment with a new OUDSM container image using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oudsm-patch-override.yaml file that contains:\nimage: repository: \u0026lt;image_location\u0026gt; tag: \u0026lt;image_tag\u0026gt; imagePullSecrets: - name: orclcred For example:\nimage: repository: container-registry.oracle.com/middleware/oudsm_cpu tag: 12.2.1.4-jdk8-ol7-new imagePullSecrets: - name: orclcred The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your oudsm container image, then you can remove the following:\nimagePullSecrets: - name: orclcred Run the following command to upgrade the deployment:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oudsm-patch-override.yaml \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --values oudsm-patch-override.yaml \\ oudsm oudsm --reuse-values Using --set argument Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Run the following command to update the deployment with a new OUDSM container image:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set image.repository=\u0026lt;image_location\u0026gt;,image.tag=\u0026lt;image_tag\u0026gt; \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --set image.repository=container-registry.oracle.com/middleware/oudsm_cpu,image.tag=12.2.1.4-jdk8-ol7-new \\ --set imagePullSecrets[0].name=\u0026#34;orclcred\u0026#34; \\ oudsm oudsm --reuse-values The following caveats exist:\n If you are not using Oracle Container Registry or your own container registry for your OUDSM container image, then you can remove the following: --set imagePullSecrets[0].name=\u0026quot;orclcred\u0026quot;. Verify the pods After updating with the new image the pod will restart. Verify the pod is running:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods For example:\n$ kubectl --namespace oudsmns get pods The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Note: It will take several minutes before the pod starts. While the oudsm pods have a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod logs, by running the following command:\n Verify the pod is using the new image by running the following command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oudsm-1 -n oudsmns The output will look similar to the following:\nName: oudsm-1 Namespace: oudsmns Priority: 0 Node: prats-crio-worker2/100.102.48.28 Start Time: Wed, 23 Mar 2022 10:38:20 +0000 Labels: app.kubernetes.io/instance=oudsm app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oudsm app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oudsm-0.1 oudsm/instance=oudsm-1 Annotations: meta.helm.sh/release-name: oudsm meta.helm.sh/release-namespace: oudsmns Status: Running IP: 10.244.1.90 etc... Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Killing 22m kubelet Container oudsm definition changed, will be restarted Normal Created 21m (x2 over 61m) kubelet Created container oudsm Normal Pulling 21m kubelet Container image \u0026#34;container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-new\u0026#34; Normal Started 21m (x2 over 61m) kubelet Started container oudsm " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/validate-sso-using-webgate/", + "title": "Validate a Basic SSO Flow using WebGate Registration ", + "tags": [], + "description": "Sample for validating a basic SSO flow using WebGate registration.", + "content": "In this section you validate single-sign on works to the OAM Kubernetes cluster via Oracle WebGate. The instructions below assume you have a running Oracle HTTP Server (for example ohs_k8s) and Oracle WebGate installed on an independent server. The instructions also assume basic knowledge of how to register a WebGate agent.\nNote: At present Oracle HTTP Server and Oracle WebGate are not supported on a Kubernetes cluster.\nUpdate the OAM Hostname and Port for the Loadbalancer If using an NGINX ingress with no load balancer, change {LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT} to {MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} when referenced below.\n Launch a browser and access the OAM console (https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}/oamconsole). Login with the weblogic username and password (weblogic/\u0026lt;password\u0026gt;)\n Navigate to Configuration → Settings ( View ) → Access Manager.\n Under Load Balancing modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and \u0026lt;port\u0026gt; respectively). In the OAM Server Protocol drop down list select https.\n Under WebGate Traffic Load Balancer modify the OAM Server Host and OAM Server Port, to point to the Loadbalancer HTTP endpoint (e.g loadbalancer.example.com and \u0026lt;port\u0026gt; repectively). In the OAM Server Protocol drop down list select https.\n Click Apply.\n Register a WebGate Agent In all the examples below, change the directory path as appropriate for your installation.\n Run the following command on the server with Oracle HTTP Server and WebGate installed:\n$ cd \u0026lt;OHS_ORACLE_HOME\u0026gt;/webgate/ohs/tools/deployWebGate $ ./deployWebGateInstance.sh -w \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s -oh \u0026lt;OHS_ORACLE_HOME\u0026gt; -ws ohs The output will look similar to the following:\nCopying files from WebGate Oracle Home to WebGate Instancedir Run the following command to update the OHS configuration files appropriately:\n$ export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:\u0026lt;OHS_ORACLE_HOME\u0026gt;/lib $ cd \u0026lt;OHS_ORACLE_HOME\u0026gt;/webgate/ohs/tools/setup/InstallTools/ $ ./EditHttpConf -w \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s -oh \u0026lt;OHS_ORACLE_HOME\u0026gt; The output will look similar to the following:\nThe web server configuration file was successfully updated \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf has been backed up as \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8s/httpd.conf.ORIG Launch a browser, and access the OAM console. Navigate to Application Security → Quick Start Wizards → SSO Agent Registration. Register the agent in the usual way, download the configuration zip file and copy to the OHS WebGate server, for example: \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8/webgate/config. Extract the zip file.\n Copy the Certificate Authority (CA) certificate (cacert.pem) for the load balancer/ingress certificate to the same directory e.g: \u0026lt;OHS_DOMAIN_HOME\u0026gt;/config/fmwconfig/components/OHS/ohs_k8/webgate/config.\nIf you used a self signed certificate for the ingress, instead copy the self signed certificate (e.g: /scratch/ssl/tls.crt) to the above directory. Rename the certificate to cacert.pem.\n Restart Oracle HTTP Server.\n Access the configured OHS e.g http://ohs.example.com:7778, and check you are redirected to the SSO login page. Login and make sure you are redirected successfully to the home page.\n Changing WebGate agent to use OAP Note: This section should only be followed if you need to change the OAM/WebGate Agent communication from HTTPS to OAP.\nTo change the WebGate agent to use OAP:\n In the OAM Console click Application Security and then Agents.\n Search for the agent you want modify and select it.\n In the User Defined Parameters change:\na) OAMServerCommunicationMode from HTTPS to OAP. For example OAMServerCommunicationMode=OAP\nb) OAMRestEndPointHostName=\u0026lt;hostname\u0026gt; to the {$MASTERNODE-HOSTNAME}. For example OAMRestEndPointHostName=masternode.example.com\n In the Server Lists section click Add to add a new server with the following values:\n Access Server: Other Host Name: \u0026lt;{$MASTERNODE-HOSTNAME}\u0026gt; Host Port: \u0026lt;oamoap-service NodePort\u0026gt; Note: To find the value for Host Port run the following:\n$ kubectl describe svc accessdomain-oamoap-service -n oamns The output will look similar to the following:\nName: accessdomain-oamoap-service Namespace: oamns Labels: \u0026lt;none\u0026gt; Annotations: \u0026lt;none\u0026gt; Selector: weblogic.clusterName=oam_cluster Type: NodePort IP Families: \u0026lt;none\u0026gt; IP: 10.100.202.44 IPs: 10.100.202.44 Port: \u0026lt;unset\u0026gt; 5575/TCP TargetPort: 5575/TCP NodePort: \u0026lt;unset\u0026gt; 30540/TCP Endpoints: 10.244.5.21:5575,10.244.6.76:5575 Session Affinity: None External Traffic Policy: Cluster Events: \u0026lt;none\u0026gt; In the example above the NodePort is 30540.\n Delete all servers in Server Lists except for the one just created, and click Apply.\n Click Download to download the webgate zip file. Copy the zip file to the desired WebGate.\n Delete the cache from \u0026lt;OHS_DOMAIN_HOME\u0026gt;/servers/ohs1/cache and restart Oracle HTTP Server.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/manage-oam-domains/", + "title": "Manage OAM Domains", + "tags": [], + "description": "This document provides steps to manage the OAM domain.", + "content": "Important considerations for Oracle Access Management domains in Kubernetes.\n a. Domain Life Cycle Learn about the domain life cycle of an OAM domain.\n b. WLST Administration Operations Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain.\n c. Logging and Visualization Describes the steps for logging and visualization with Elasticsearch and Kibana.\n d. Monitoring an OAM domain Describes the steps for Monitoring the OAM domain.\n e. Delete the OAM domain home Learn about the steps to cleanup the OAM domain home.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/manage-oig-domains/", + "title": "Manage OIG domains", + "tags": [], + "description": "This document provides steps to manage the OIG domain.", + "content": "Important considerations for Oracle Identity Governance domains in Kubernetes.\n Domain life cycle Learn about the domain life cyle of an OIG domain.\n WLST administration operations Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OIG Domain.\n Runnning OIG utilities Describes the steps for running OIG utilities in Kubernetes.\n Logging and visualization Describes the steps for logging and visualization with Elasticsearch and Kibana.\n Monitoring an OIG domain Describes the steps for Monitoring the OIG domain and Publising the logs to Elasticsearch.\n Delete the OIG domain home Learn about the steps to cleanup the OIG domain home.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the status of a namespace View pod logs View pod description Cleaning down a failed OID deployment Check the status of a namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oidns get pod,service,secret,pv,pvc,ingress -o wide Output will be similar to the following:\n NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 26m 10.244.1.150 \u0026lt;worker\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 26m 10.244.2.157 \u0026lt;worker\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oid-lbr-ldap ClusterIP 10.96.82.57 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 26m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid service/oidhost1 ClusterIP 10.111.67.10 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP,7001/TCP,7002/TCP 26m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost1 service/oidhost2 ClusterIP 10.96.29.184 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 26m app.kubernetes.io/instance=oid,app.kubernetes.io/name=oid,oid/instance=oidhost2 NAME TYPE DATA AGE secret/default-token-5nrlh kubernetes.io/service-account-token 3 3d7h secret/oid-creds opaque 7 26m secret/oid-tls-cert kubernetes.io/tls 2 26m secret/oid-token-s95zt kubernetes.io/service-account-token 3 26m secret/orclcred kubernetes.io/dockerconfigjson 1 3d7h secret/sh.helm.release.v1.oid.v1 helm.sh/release.v1 1 26m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oid-pv 20Gi RWX Delete Bound oidns/oid-pvc manual 26m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oid-pvc Bound oid-pv 20Gi RWX manual 26m Filesystem NAME CLASS HOSTS ADDRESS PORTS AGE ingress.networking.k8s.io/oid-ingress-nginx \u0026lt;none\u0026gt; * 80, 443 26m Include/exclude elements (pod,service,secret,pv,pvc,ingress) as required.\nView POD Logs To view logs for a pod use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oidhost1 -n oidns Output will depend on the application running in the POD.\nView Pod Description Details about a pod can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oidhost1 -n oidns Output will be similar to the following:\nName: oidhost1 Namespace: oidns Priority: 0 Node: \u0026lt;worker\u0026gt;/100.102.48.28 Start Time: Mon, 28 Mar 2022 16:19:54 +0000 Labels: app.kubernetes.io/instance=oid app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oid app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oid-0.1 oid/instance=oidhost1 Annotations: meta.helm.sh/release-name: oid meta.helm.sh/release-namespace: oidns Status: Running IP: 10.244.1.150 IPs: IP: 10.244.1.150 Containers: oid: Container ID: cri-o://4172c7694d84c64c7c16e02fe96a8fc233b530ff5ae3b24d6440ff958c224e85 Image: container-registry.oracle.com/middleware/oid_cpu:12.2.1.4-jdk8-ol7-220223.1744 Image ID: container-registry.oracle.com/middleware/oid_cpu@sha256:ec1483590503837a3aa355dab1e33ab5237017b3924e12c2b1554c373d43a16b Ports: 3060/TCP, 3131/TCP, 7001/TCP, 7002/TCP Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP State: Running Started: Mon, 28 Mar 2022 16:19:55 +0000 Ready: True Restart Count: 0 Readiness: exec [/u01/oracle/dockertools/healthcheck_status.sh] delay=600s timeout=30s period=60s #success=1 #failure=15 Environment: INSTANCE_TYPE: PRIMARY sleepBeforeConfig: 180 INSTANCE_NAME: oid1 ADMIN_LISTEN_HOST: oidhost1 REALM_DN: dc=oid CONNECTION_STRING: oiddb.example.com:1521/oiddb.example.com LDAP_PORT: 3060 LDAPS_PORT: 3131 ADMIN_LISTEN_PORT: 7001 ADMIN_LISTEN_SSL_PORT: 7002 DOMAIN_NAME: oid_domain DOMAIN_HOME: /u01/oracle/user_projects/domains/oid_domain RCUPREFIX: OIDK8S7 ADMIN_USER: \u0026lt;set to the key 'adminUser' in secret 'oid-creds'\u0026gt; Optional: false ADMIN_PASSWORD: \u0026lt;set to the key 'adminPassword' in secret 'oid-creds'\u0026gt; Optional: false DB_USER: \u0026lt;set to the key 'dbUser' in secret 'oid-creds'\u0026gt; Optional: false DB_PASSWORD: \u0026lt;set to the key 'dbPassword' in secret 'oid-creds'\u0026gt; Optional: false DB_SCHEMA_PASSWORD: \u0026lt;set to the key 'dbschemaPassword' in secret 'oid-creds'\u0026gt; Optional: false ORCL_ADMIN_PASSWORD: \u0026lt;set to the key 'orcladminPassword' in secret 'oid-creds'\u0026gt; Optional: false SSL_WALLET_PASSWORD: \u0026lt;set to the key 'sslwalletPassword' in secret 'oid-creds'\u0026gt; Optional: false ldapPort: 3060 ldapsPort: 3131 httpPort: 7001 httpsPort: 7002 Mounts: /u01/oracle/user_projects from oid-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-r26f6 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oid-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oid-pvc ReadOnly: false kube-api-access-r26f6: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: \u0026lt;nil\u0026gt; DownwardAPI: true QoS Class: BestEffort Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Scheduled 29m default-scheduler Successfully assigned oidns/oidhost1 to \u0026lt;worker\u0026gt; Normal Pulled 29m kubelet Container image \u0026quot;container-registry.oracle.com/middleware/oid_cpu:12.2.1.4-jdk8-ol7-220223.1744\u0026quot; already present on machine Normal Created 29m kubelet Created container oid Normal Started 29m kubelet Started container oid " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the status of a namespace View pod logs View pod description Check the status of a namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get nodes,pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudns get pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 1 2d2h 10.244.2.129 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 1 2d2h 10.244.2.128 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 1 2d2h 10.244.1.53 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs-0 ClusterIP 10.111.120.232 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP 10.98.199.92 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP 10.103.22.27 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-http-0 ClusterIP 10.100.75.60 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.96.125.29 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.98.147.195 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-lbr-admin ClusterIP 10.105.146.21 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.101.185.178 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.111.134.94 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.102.210.144 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.98.75.22 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.110.130.119 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 2d2h app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 NAME TYPE DATA AGE secret/default-token-n2pmp kubernetes.io/service-account-token 3 3d1h secret/orclcred kubernetes.io/dockerconfigjson 1 3d secret/oud-ds-rs-creds opaque 8 2d2h secret/oud-ds-rs-job-token-p4pz7 kubernetes.io/service-account-token 3 2d2h secret/oud-ds-rs-tls-cert kubernetes.io/tls 2 2d2h secret/oud-ds-rs-token-qzqt2 kubernetes.io/service-account-token 3 2d2h secret/sh.helm.release.v1.oud-ds-rs.v1 helm.sh/release.v1 1 2d2h secret/sh.helm.release.v1.oud-ds-rs.v2 helm.sh/release.v1 1 2d1h secret/sh.helm.release.v1.oud-ds-rs.v3 helm.sh/release.v1 1 2d1h secret/sh.helm.release.v1.oud-ds-rs.v4 helm.sh/release.v1 1 28h secret/sh.helm.release.v1.oud-ds-rs.v5 helm.sh/release.v1 1 25h secret/sh.helm.release.v1.oud-ds-rs.v6 helm.sh/release.v1 1 23h secret/sh.helm.release.v1.oud-ds-rs.v7 helm.sh/release.v1 1 23h NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/fmwk8s-jenkins-pv 1Gi RWO,RWX Delete Bound fmwk8s/fmwk8s-jenkins-pvc fmwk8s-jenkins-pv 35d Filesystem persistentvolume/fmwk8s-pv 1Gi RWO,RWX Delete Bound fmwk8s/fmwk8s-pvc fmwk8s-pv 35d Filesystem persistentvolume/fmwk8s-root-pv 1Gi RWO,RWX Delete Bound fmwk8s/fmwk8s-root-pvc fmwk8s-root-pv 35d Filesystem persistentvolume/oud-ds-rs-espv1 20Gi RWX Retain Bound oudns/data-oud-ds-rs-es-cluster-0 elk-oud 23h Filesystem persistentvolume/oud-ds-rs-job-pv 2Gi RWX Delete Bound oudns/oud-ds-rs-job-pvc manual 2d2h Filesystem persistentvolume/oud-ds-rs-pv 20Gi RWX Delete Bound oudns/oud-ds-rs-pvc manual 2d2h Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/data-oud-ds-rs-es-cluster-0 Bound oud-ds-rs-espv1 20Gi RWX elk-oud 23h Filesystem persistentvolumeclaim/oud-ds-rs-job-pvc Bound oud-ds-rs-job-pv 2Gi RWX manual 2d2h Filesystem persistentvolumeclaim/oud-ds-rs-pvc Bound oud-ds-rs-pv 20Gi RWX manual 2d2h Filesystem NAME CLASS HOSTS ADDRESS PORTS AGE ingress.networking.k8s.io/oud-ds-rs-admin-ingress-nginx \u0026lt;none\u0026gt; oud-ds-rs-admin-0,oud-ds-rs-admin-1,oud-ds-rs-admin-2 + 3 more... 80, 443 2d2h ingress.networking.k8s.io/oud-ds-rs-http-ingress-nginx \u0026lt;none\u0026gt; oud-ds-rs-http-0,oud-ds-rs-http-1,oud-ds-rs-http-2 + 4 more... 80, 443 2d2h Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.\nView pod logs To view logs for a pod use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oud-ds-rs-0 -n oudns View pod description Details about a pod can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oud-ds-rs-0 -n oudns The output will look similar to the following:\nName: oud-ds-rs-0 Namespace: oudns Priority: 0 Node: \u0026lt;Worker Node\u0026gt;/100.102.48.84 Start Time: Wed, 16 Mar 2022 14:39:09 +0000 Labels: app.kubernetes.io/instance=oud-ds-rs app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oud-ds-rs app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oud-ds-rs-0.1 oud/instance=oud-ds-rs-0 Annotations: meta.helm.sh/release-name: oud-ds-rs meta.helm.sh/release-namespace: oudns Status: Running IP: 10.244.2.129 IPs: IP: 10.244.2.129 Containers: oud-ds-rs: Container ID: cri-o://2795176b6af2c17a9426df54214c7e53318db9676bbcf3676d67843174845d68 Image: container-registry.oracle.com/middleware/oud_cpu:12.2.1.4-jdk8-ol7-220119.2051 Image ID: container-registry.oracle.com/middleware/oud_cpu@sha256:6ba20e54d17bb41312618011481e9b35a40f36f419834d751277f2ce2f172dca Ports: 1444/TCP, 1888/TCP, 1389/TCP, 1636/TCP, 1080/TCP, 1081/TCP, 1898/TCP Host Ports: 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP, 0/TCP State: Running Started: Wed, 16 Mar 2022 15:38:10 +0000 Last State: Terminated Reason: Error Exit Code: 137 Started: Wed, 16 Mar 2022 14:39:10 +0000 Finished: Wed, 16 Mar 2022 15:37:16 +0000 Ready: True Restart Count: 1 Liveness: tcp-socket :ldap delay=900s timeout=15s period=30s #success=1 #failure=1 Readiness: exec [/u01/oracle/container-scripts/checkOUDInstance.sh] delay=180s timeout=30s period=60s #success=1 #failure=10 Environment: instanceType: Directory sleepBeforeConfig: 3 OUD_INSTANCE_NAME: oud-ds-rs-0 hostname: oud-ds-rs-0 baseDN: dc=example,dc=com rootUserDN: \u0026lt;set to the key 'rootUserDN' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false rootUserPassword: \u0026lt;set to the key 'rootUserPassword' in secret 'oud-ds-rs-creds'\u0026gt; Optional: false adminConnectorPort: 1444 httpAdminConnectorPort: 1888 ldapPort: 1389 ldapsPort: 1636 httpPort: 1080 httpsPort: 1081 replicationPort: 1898 sampleData: 0 Mounts: /u01/oracle/user_projects from oud-ds-rs-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-vr6v8 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oud-ds-rs-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oud-ds-rs-pvc ReadOnly: false kube-api-access-vr6v8: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: \u0026lt;nil\u0026gt; DownwardAPI: true QoS Class: BestEffort Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot issues.", + "content": " Check the status of a namespace View pod logs View pod description Check the status of a namespace To check the status of objects in a namespace use the following command:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get nodes,pod,service,secret,pv,pvc,ingress -o wide For example:\n$ kubectl --namespace oudsmns get nodes,pod,service,secret,pv,pvc,ingress -o wide The output will look similar to the following:\n$ kubectl --namespace oudsmns get pod,service,secret,pv,pvc,ingress -o wide NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 18m 10.244.1.89 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.101.79.110 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 18m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-lbr ClusterIP 10.106.241.204 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 18m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm NAME TYPE DATA AGE secret/default-token-jtwn2 kubernetes.io/service-account-token 3 22h secret/orclcred kubernetes.io/dockerconfigjson 1 22h secret/oudsm-creds opaque 2 18m secret/oudsm-tls-cert kubernetes.io/tls 2 18m secret/oudsm-token-7kjff kubernetes.io/service-account-token 3 18m secret/sh.helm.release.v1.oudsm.v1 helm.sh/release.v1 1 18m NAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE VOLUMEMODE persistentvolume/oudsm-pv 20Gi RWX Delete Bound oudsmns/oudsm-pvc manual 18m Filesystem NAME STATUS VOLUME CAPACITY ACCESS MODES STORAGECLASS AGE VOLUMEMODE persistentvolumeclaim/oudsm-pvc Bound oudsm-pv 20Gi RWX manual 18m Filesystem NAME CLASS HOSTS ADDRESS PORTS AGE ingress.networking.k8s.io/oudsm-ingress-nginx \u0026lt;none\u0026gt; oudsm-1,oudsm 80, 443 18m Include/exclude elements (nodes,pod,service,secret,pv,pvc,ingress) as required.\nView pod logs To view logs for a pod use the following command:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl logs oudsm-1 -n oudsmns View pod description Details about a pod can be viewed using the kubectl describe command:\n$ kubectl describe pod \u0026lt;pod\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe pod oudsm-1 -n oudsmns The output will look similar to the following:\nName: oudsm-1 Namespace: oudsmns Priority: 0 Node: \u0026lt;worker-node\u0026gt;/100.102.48.28 Start Time: Tue, 22 Mar 2022 09:56:11 +0000 Labels: app.kubernetes.io/instance=oudsm app.kubernetes.io/managed-by=Helm app.kubernetes.io/name=oudsm app.kubernetes.io/version=12.2.1.4.0 helm.sh/chart=oudsm-0.1 oudsm/instance=oudsm-1 Annotations: meta.helm.sh/release-name: oudsm meta.helm.sh/release-namespace: oudsmns Status: Running IP: 10.244.1.89 IPs: IP: 10.244.1.89 Containers: oudsm: Container ID: cri-o://37dbe00257095adc0a424b8841db40b70bbb65645451e0bc53718a0fd7ce22e4 Image: container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-220223.2053 Image ID: container-registry.oracle.com/middleware/oudsm_cpu@sha256:47960d36d502d699bfd8f9b1be4c9216e302db95317c288f335f9c8a32974f2c Ports: 7001/TCP, 7002/TCP Host Ports: 0/TCP, 0/TCP State: Running Started: Tue, 22 Mar 2022 09:56:12 +0000 Ready: True Restart Count: 0 Liveness: http-get http://:7001/oudsm delay=1200s timeout=15s period=60s #success=1 #failure=3 Readiness: http-get http://:7001/oudsm delay=900s timeout=15s period=30s #success=1 #failure=3 Environment: DOMAIN_NAME: oudsmdomain-1 ADMIN_USER: \u0026lt;set to the key 'adminUser' in secret 'oudsm-creds'\u0026gt; Optional: false ADMIN_PASS: \u0026lt;set to the key 'adminPass' in secret 'oudsm-creds'\u0026gt; Optional: false ADMIN_PORT: 7001 ADMIN_SSL_PORT: 7002 WLS_PLUGIN_ENABLED: true Mounts: /u01/oracle/user_projects from oudsm-pv (rw) /var/run/secrets/kubernetes.io/serviceaccount from kube-api-access-9ht84 (ro) Conditions: Type Status Initialized True Ready True ContainersReady True PodScheduled True Volumes: oudsm-pv: Type: PersistentVolumeClaim (a reference to a PersistentVolumeClaim in the same namespace) ClaimName: oudsm-pvc ReadOnly: false kube-api-access-9ht84: Type: Projected (a volume that contains injected data from multiple sources) TokenExpirationSeconds: 3607 ConfigMapName: kube-root-ca.crt ConfigMapOptional: \u0026lt;nil\u0026gt; DownwardAPI: true QoS Class: BestEffort Node-Selectors: \u0026lt;none\u0026gt; Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s node.kubernetes.io/unreachable:NoExecute op=Exists for 300s Events: Type Reason Age From Message ---- ------ ---- ---- ------- Warning FailedScheduling 39m default-scheduler 0/3 nodes are available: 3 pod has unbound immediate PersistentVolumeClaims. Normal Scheduled 39m default-scheduler Successfully assigned oudsmns/oudsm-1 to \u0026lt;worker-node\u0026gt; Normal Pulled 39m kubelet Container image \u0026quot;container-registry.oracle.com/middleware/oudsm_cpu:12.2.1.4-jdk8-ol7-220223.2053\u0026quot; already present on machine Normal Created 39m kubelet Created container oudsm Normal Started 39m kubelet Started container oudsm " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Access Management (OAM) container image used for deploying OAM domains.", + "content": "As described in Prepare Your Environment you can create your own OAM container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Access Management image for production deployments.\nCreate or update an Oracle Access Management image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Access Management image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Access Management image containing the Oracle Access Management binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OAM patches because it optimizes the size of the image. Use update for patching an existing Oracle Access Management image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Access Management container image using the WebLogic Image Tool requires additional container scripts for Oracle Access Management domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Access Management image.\nDownload the Oracle Access Management installation binaries and patches You must download the required Oracle Access Management installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Identity and Access Management 12.2.1.4.0\n fmw_12.2.1.4.0_idm.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar OAM and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Access Management (OAM) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u301 --type oam --version=12.2.1.4.0 --tag=oam-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/addtionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar $ imagetool cache addInstaller --type OAM --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_idm.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 32971905_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32971905_122140_Generic.zip $ imagetool cache addEntry --key 20812896_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p20812896_122140_Generic.zip $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32880070_122140_Generic.zip $ imagetool cache addEntry --key 33059296_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33059296_122140_Generic.zip $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32905339_122140_Generic.zip $ imagetool cache addEntry --key 33084721_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33084721_122140_Generic.zip $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31544353_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 32957281_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32957281_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u301 --type oam --version=12.2.1.4.0 --tag=oam-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/install/iam.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleAccessManagement/dockerfiles/12.2.1.4.0/container-scripts --patches 32971905_12.2.1.4.0,20812896_12.2.1.4.0,32880070_12.2.1.4.0,33059296_12.2.1.4.0,32905339_12.2.1.4.0,33084721_12.2.1.4.0,31544353_12.2.1.4.0,32957281_12.2.1.4.0,33093748_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type OAM. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Access Management image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleAccessManagement/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oam The output will look similar to the following:\noam-latestpsu 12.2.1.4.0 ad732fc7c16b About a minute ago 3.35GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oam-latestpsu.tar oam-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Access Management image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oam:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=32701831_12.2.1.4.210607 --value \u0026lt;downloaded-patches-location\u0026gt;/p32701831_12214210607_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oam:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oam:12.2.1.4.0 --tag=oracle/oam-new:12.2.1.4.0 --patches=32701831_12.2.1.4.210607 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oam The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oam-new 12.2.1.4.0 78ccd1ad67eb 5 minutes ago 3.8GB oracle/oam 12.2.1.4.0 b051804ba15f 3 months ago 3.34GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oam-new.tar oracle/oam-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/create-or-update-image/", + "title": "Create or update an image", + "tags": [], + "description": "Create or update an Oracle Identity Governance (OIG) container image used for deploying OIG domains", + "content": "As described in Prepare Your Environment you can create your own OIG container image. If you have access to the My Oracle Support (MOS), and there is a need to build a new image with an interim or one off patch, it is recommended to use the WebLogic Image Tool to build an Oracle Identity Governance image for production deployments.\nCreate or update an Oracle Identity Governance image using the WebLogic Image Tool Using the WebLogic Image Tool, you can create a new Oracle Identity Governance image with PSU\u0026rsquo;s and interim patches or update an existing image with one or more interim patches.\n Recommendations:\n Use create for creating a new Oracle Identity Governance image containing the Oracle Identity Governance binaries, bundle patch and interim patches. This is the recommended approach if you have access to the OIG patches because it optimizes the size of the image. Use update for patching an existing Oracle Identity Governance image with a single interim patch. Note that the patched image size may increase considerably due to additional image layers introduced by the patch application tool. Create an image Set up the WebLogic Image Tool Prerequisites Set up the WebLogic Image Tool Validate setup WebLogic Image Tool build directory WebLogic Image Tool cache Prerequisites Verify that your environment meets the following prerequisites:\n Docker client and daemon on the build machine, with minimum Docker version 18.03.1.ce. Bash version 4.0 or later, to enable the command complete feature. JAVA_HOME environment variable set to the appropriate JDK location e.g: /scratch/export/oracle/product/jdk Set up the WebLogic Image Tool To set up the WebLogic Image Tool:\n Create a working directory and change to it:\n$ mkdir \u0026lt;workdir\u0026gt; $ cd \u0026lt;workdir\u0026gt; For example:\n$ mkdir /scratch/imagetool-setup $ cd /scratch/imagetool-setup Download the latest version of the WebLogic Image Tool from the releases page.\n$ wget https://github.com/oracle/weblogic-image-tool/releases/download/release-X.X.X/imagetool.zip where X.X.X is the latest release referenced on the releases page.\n Unzip the release ZIP file in the imagetool-setup directory.\n$ unzip imagetool.zip Execute the following commands to set up the WebLogic Image Tool:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup/imagetool/bin $ source setup.sh For example:\n$ cd /scratch/imagetool-setup/imagetool/bin $ source setup.sh Validate setup To validate the setup of the WebLogic Image Tool:\n Enter the following command to retrieve the version of the WebLogic Image Tool:\n$ imagetool --version Enter imagetool then press the Tab key to display the available imagetool commands:\n$ imagetool \u0026lt;TAB\u0026gt; cache create help rebase update WebLogic Image Tool build directory The WebLogic Image Tool creates a temporary Docker context directory, prefixed by wlsimgbuilder_temp, every time the tool runs. Under normal circumstances, this context directory will be deleted. However, if the process is aborted or the tool is unable to remove the directory, it is safe for you to delete it manually. By default, the WebLogic Image Tool creates the Docker context directory under the user\u0026rsquo;s home directory. If you prefer to use a different directory for the temporary context, set the environment variable WLSIMG_BLDDIR:\n$ export WLSIMG_BLDDIR=\u0026#34;/path/to/buid/dir\u0026#34; WebLogic Image Tool cache The WebLogic Image Tool maintains a local file cache store. This store is used to look up where the Java, WebLogic Server installers, and WebLogic Server patches reside in the local file system. By default, the cache store is located in the user\u0026rsquo;s $HOME/cache directory. Under this directory, the lookup information is stored in the .metadata file. All automatically downloaded patches also reside in this directory. You can change the default cache store location by setting the environment variable WLSIMG_CACHEDIR:\n$ export WLSIMG_CACHEDIR=\u0026#34;/path/to/cachedir\u0026#34; Set up additional build scripts Creating an Oracle Identity Governance container image using the WebLogic Image Tool requires additional container scripts for Oracle Identity Governance domains.\n Clone the docker-images repository to set up those scripts. In these steps, this directory is DOCKER_REPO:\n$ cd \u0026lt;workdir\u0026gt;/imagetool-setup $ git clone https://github.com/oracle/docker-images.git For example:\n$ cd /scratch/imagetool-setup $ git clone https://github.com/oracle/docker-images.git Note: If you want to create the image continue with the following steps, otherwise to update the image see update an image.\n Create an image After setting up the WebLogic Image Tool, follow these steps to use the WebLogic Image Tool to create a new Oracle Identity Governance image.\nDownload the Oracle Identity Governance installation binaries and patches You must download the required Oracle Identity Governance installation binaries and patches as listed below from the Oracle Software Delivery Cloud and save them in a directory of your choice.\nThe installation binaries and patches required are:\n Oracle Identity and Access Management 12.2.1.4.0\n fmw_12.2.1.4.0_idm.jar Oracle Fusion Middleware 12c Infrastructure 12.2.1.4.0\n fmw_12.2.1.4.0_infrastructure.jar Oracle SOA Suite for Oracle Middleware 12.2.1.4.0\n fmw_12.2.1.4.0_soa.jar Oracle Service Bus 12.2.1.4.0\n fmw_12.2.1.4.0_osb.jar OIG and FMW Infrastructure Patches:\n View document ID 2723908.1 on My Oracle Support. In the Container Image Download/Patch Details section, locate the Oracle Identity Governance (OIG) table. For the latest PSU click the README link in the Documentation column. In the README, locate the \u0026ldquo;Installed Software\u0026rdquo; section. All the patch numbers to be download are listed here. Download all these individual patches from My Oracle Support. Oracle JDK v8\n jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version referenced in the README above. Update required build files The following files in the code repository location \u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0 are used for creating the image:\n additionalBuildCmds.txt buildArgs . Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and change %DOCKER_REPO%, %JDK_VERSION% and %BUILDTAG% appropriately.\nFor example:\ncreate --jdkVersion=8u311 --type oig --chown oracle:root --version=12.2.1.4.0 --tag=oig-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/imagetool-setup/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4.0/install.file and under the GENERIC section add the line INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026rdquo;. For example:\n[GENERIC] INSTALL_TYPE=\u0026quot;Fusion Middleware Infrastructure\u0026quot; DECLINE_SECURITY_UPDATES=true SECURITY_UPDATES_VIA_MYORACLESUPPORT=false Create the image Add a JDK package to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type jdk --version 8uXXX --path \u0026lt;download location\u0026gt;/jdk-8uXXX-linux-x64.tar.gz where XXX is the JDK version downloaded\n Add the downloaded installation binaries to the WebLogic Image Tool cache. For example:\n$ imagetool cache addInstaller --type fmw --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_infrastructure.jar $ imagetool cache addInstaller --type soa --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_soa.jar $ imagetool cache addInstaller --type osb --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_osb.jar $ imagetool cache addInstaller --type idm --version 12.2.1.4.0 --path \u0026lt;download location\u0026gt;/fmw_12.2.1.4.0_idm.jar Add the downloaded OPatch patch to the WebLogic Image Tool cache. For example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;download location\u0026gt;/p28186730_139428_Generic.zip Add the rest of the downloaded product patches to the WebLogic Image Tool cache:\n$ imagetool cache addEntry --key \u0026lt;patch\u0026gt;_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p\u0026lt;patch\u0026gt;_122140_Generic.zip For example:\n$ imagetool cache addEntry --key 33416868_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33416868_122140_Generic.zip $ imagetool cache addEntry --key 33453703_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33453703_122140_Generic.zip $ imagetool cache addEntry --key 32999272_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32999272_122140_Generic.zip $ imagetool cache addEntry --key 33093748_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33093748_122140_Generic.zip $ imagetool cache addEntry --key 33281560_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33281560_122140_Generic.zip $ imagetool cache addEntry --key 31544353_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p31544353_122140_Linux-x86-64.zip $ imagetool cache addEntry --key 33313802_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33313802_122140_Generic.zip $ imagetool cache addEntry --key 33408307_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33408307_122140_Generic.zip $ imagetool cache addEntry --key 33286160_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p33286160_122140_Generic.zip $ imagetool cache addEntry --key 32880070_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32880070_122140_Generic.zip $ imagetool cache addEntry --key 32905339_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32905339_122140_Generic.zip $ imagetool cache addEntry --key 32784652_12.2.1.4.0 --value \u0026lt;download location\u0026gt;/p32784652_122140_Generic.zip Edit the \u0026lt;workdir\u0026gt;/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs file and append the product patches and opatch patch as follows:\n--patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 An example buildArgs file is now as follows:\ncreate --jdkVersion=8u301 --type oig --version=12.2.1.4.0 --tag=oig-latestpsu:12.2.1.4.0 --pull --installerResponseFile /scratch/imagetool-setup/docker-images/OracleFMWInfrastructure/dockerfiles/12.2.1.4/install.file,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/soasuite.response,/scratch/docker-images/OracleSOASuite/dockerfiles/12.2.1.4.0/install/osb.response,/scratch/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/idmqs.response --additionalBuildCommands /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/additionalBuildCmds.txt --additionalBuildFiles /scratch/imagetool-setup/docker-images/OracleIdentityGovernance/dockerfiles/12.2.1.4.0/container-scripts --patches 33416868_12.2.1.4.0,33453703_12.2.1.4.0,32999272_12.2.1.4.0,33093748_12.2.1.4.0,33281560_12.2.1.4.0,31544353_12.2.1.4.0,33313802_12.2.1.4.0,33408307_12.2.1.4.0,33286160_12.2.1.4.0,32880070_12.2.1.4.0,32905339_12.2.1.4.0,32784652_12.2.1.4.0 --opatchBugNumber=28186730_13.9.4.2.8 Note: In the buildArgs file:\n --jdkVersion value must match the --version value used in the imagetool cache addInstaller command for --type jdk. --version value must match the --version value used in the imagetool cache addInstaller command for --type idm. Refer to this page for the complete list of options available with the WebLogic Image Tool create command.\n Create the Oracle Identity Governance image:\n$ imagetool @\u0026lt;absolute path to buildargs file\u0026gt; --fromImage ghcr.io/oracle/oraclelinux:7-slim Note: Make sure that the absolute path to the buildargs file is prepended with a @ character, as shown in the example above.\n For example:\n$ imagetool @\u0026lt;imagetool-setup-location\u0026gt;/docker-images/OracleIdentityGovernance/imagetool/12.2.1.4.0/buildArgs --fromImage ghcr.io/oracle/oraclelinux:7-slim Check the created image using the docker images command:\n$ docker images | grep oig The output will look similar to the following:\noig-latestpsu 12.2.1.4.0 e391ed154bcb 50 seconds ago 4.43GB Run the following command to save the container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oig-latestpsu.tar oig-latestpsu:12.2.1.4.0 Update an image The steps below show how to update an existing Oracle Identity Governance image with an interim patch.\nThe container image to be patched must be loaded in the local docker images repository before attempting these steps.\nIn the examples below the image oracle/oig:12.2.1.4.0 is updated with an interim patch.\n$ docker images REPOSITORY TAG IMAGE ID CREATED SIZE oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB Set up the WebLogic Image Tool.\n Download the required interim patch and latest Opatch (28186730) from My Oracle Support. and save them in a directory of your choice.\n Add the OPatch patch to the WebLogic Image Tool cache, for example:\n$ imagetool cache addEntry --key 28186730_13.9.4.2.8 --value \u0026lt;downloaded-patches-location\u0026gt;/p28186730_139428_Generic.zip Execute the imagetool cache addEntry command for each patch to add the required patch(es) to the WebLogic Image Tool cache. For example, to add patch p32701831_12214210607_Generic.zip:\n$ imagetool cache addEntry --key=33165837_12.2.1.4.210708 --value \u0026lt;downloaded-patches-location\u0026gt;/p33165837_12214210708_Generic.zip Provide the following arguments to the WebLogic Image Tool update command:\n –-fromImage - Identify the image that needs to be updated. In the example below, the image to be updated is oracle/oig:12.2.1.4.0. –-patches - Multiple patches can be specified as a comma-separated list. --tag - Specify the new tag to be applied for the image being built. Refer here for the complete list of options available with the WebLogic Image Tool update command.\n Note: The WebLogic Image Tool cache should have the latest OPatch zip. The WebLogic Image Tool will update the OPatch if it is not already updated in the image.\n For example:\n$ imagetool update --fromImage oracle/oig:12.2.1.4.0 --tag=oracle/oig-new:12.2.1.4.0 --patches=33165837_12.2.1.4.210708 --opatchBugNumber=28186730_13.9.4.2.8 Note: If the command fails because the files in the image being upgraded are not owned by oracle:oracle, then add the parameter --chown \u0026lt;userid\u0026gt;:\u0026lt;groupid\u0026gt; to correspond with the values returned in the error.\n Check the built image using the docker images command:\n$ docker images | grep oig The output will look similar to the following:\nREPOSITORY TAG IMAGE ID CREATED SIZE oracle/oig-new 12.2.1.4.0 0c8381922e95 16 seconds ago 4.91GB oracle/oig 12.2.1.4.0 298fdb98e79c 3 months ago 4.42GB Run the following command to save the patched container image to a tar file:\n$ docker save -o \u0026lt;path\u0026gt;/\u0026lt;file\u0026gt;.tar \u0026lt;image\u0026gt; For example:\n$ docker save -o $WORKDIR/oig-new.tar oracle/oig-new:12.2.1.4.0 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/patch-and-upgrade/", + "title": "Patch and Upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OAM image, WebLogic Kubernetes Operator or Kubernetes Cluster.", + "content": "Patch an existing OAM image, or upgrade the WebLogic Kubernetes Operator release.\n a. Patch an image Instructions on how to update your OAM Kubernetes cluster with a new OAM container image.\n b. Upgrade an operator release Instructions on how to update the WebLogic Kubernetes Operator version.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/patch-and-upgrade/", + "title": "Patch and upgrade", + "tags": [], + "description": "This document provides steps to patch or upgrade an OIG image, or WebLogic Kubernetes Operator.", + "content": "Patch an existing Oracle OIG image, or upgrade the WebLogic Kubernetes Operator release.\n a. Patch an image Instructions on how to update your OIG Kubernetes cluster with a new OIG container image.\n b. Upgrade an operator release Instructions on how to update the WebLogic Kubernetes Operator version.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "How to Troubleshoot domain creation failure.", + "content": "Domain creation failure If the OAM domain creation fails when running create-domain.sh, run the following to diagnose the issue:\n Run the following command to diagnose the create domain job:\n$ kubectl logs \u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns Also run:\n$ kubectl describe pod \u0026lt;domain_job\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pod accessdomain-create-fmw-infra-sample-domain-job-c6vfb -n oamns Using the output you should be able to diagnose the problem and resolve the issue.\nClean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.\n If any of the above commands return the following error:\nFailed to start container \u0026quot;create-fmw-infra-sample-domain-job\u0026quot;: Error response from daemon: error while creating mount source path '/scratch/shared/accessdomainpv ': mkdir /scratch/shared/accessdomainpv : permission denied then there is a permissions error on the directory for the PV and PVC and the following should be checked:\na) The directory has 777 permissions: chmod -R 777 \u0026lt;persistent_volume\u0026gt;/accessdomainpv.\nb) If it does have the permissions, check if an oracle user exists and the uid is 1000 and gid is 0.\nCreate the oracle user if it doesn\u0026rsquo;t exist and set the uid to 1000 and gid to 0.\nc) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:\nweblogicDomainStoragePath: /scratch/shared/accessdomainpv/ Clean down the failed domain creation by following steps 1-3 in Delete the OAM domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OAM domain creation steps again.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/troubleshooting/", + "title": "Troubleshooting", + "tags": [], + "description": "Sample for creating an OIG domain home on an existing PV or PVC, and the domain resource YAML file for deploying the generated OIG domain.", + "content": "Domain creation failure If the OIG domain creation fails when running create-domain.sh, run the following to diagnose the issue:\n Run the following command to diagnose the create domain job:\n$ kubectl logs \u0026lt;job_name\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns Also run:\n$ kubectl describe pod \u0026lt;job_domain\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl describe pod governancedomain-create-fmw-infra-sample-domain-job-9wqzb -n oigns Using the output you should be able to diagnose the problem and resolve the issue.\nClean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.\n If any of the above commands return the following error:\nFailed to start container \u0026quot;create-fmw-infra-sample-domain-job\u0026quot;: Error response from daemon: error while creating mount source path '/scratch/shared/governancedomainpv ': mkdir /scratch/shared/governancedomainpv : permission denied then there is a permissions error on the directory for the PV and PVC and the following should be checked:\na) The directory has 777 permissions: chmod -R 777 \u0026lt;persistent_volume\u0026gt;/governancedomainpv.\nb) If it does have the permissions, check if an oracle user exists and the uid and gid equal 1000, for example:\n$ uid=1000(oracle) gid=1000(spg) groups=1000(spg),59968(oinstall),8500(dba),100(users),1007(cgbudba) Create the oracle user if it doesn\u0026rsquo;t exist and set the uid and gid to 1000.\nc) Edit the $WORKDIR/kubernetes/create-weblogic-domain-pv-pvc/create-pv-pvc-inputs.yaml and add a slash to the end of the directory for the weblogicDomainStoragePath parameter:\nweblogicDomainStoragePath: /scratch/shared/governancedomainpv/ Clean down the failed domain creation by following steps 1-3 in Delete the OIG domain home. Then follow RCU schema creation onwards to recreate the RCU schema, kubernetes secrets for domain and RCU, the persistent volume and the persistent volume claim. Then execute the OIG domain creation steps again.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/manage-oid-containers/scaling-up-down/", + "title": "a) Scaling Up/Down OID Pods ", + "tags": [], + "description": "Describes the steps for scaling up/down for OID pods.", + "content": "Introduction This section describes how to increase or decrease the number of OID pods in the Kubernetes deployment.\nView existing OID pods By default the oid helm chart deployment starts two pods: oidhost1 and oidhost2.\nThe number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the two pods above.\nTo scale up or down the number of OID pods, set replicaCount accordingly.\nRun the following command to view the number of pods in the OID deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods -o wide For example:\n$ kubectl --namespace oidns get pods -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 34m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 34m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Scaling up OID pods In this example, replicaCount is increased to 2 which creates a new OID pod oidhost3 with associated services created.\nYou can scale up the number of OID pods using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oid-scaleup-override.yaml file that contains:\nreplicaCount: 2 Run the following command to scale up the OID pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oid-scaleup-override.yaml \\ \u0026lt;release_name\u0026gt; oid --reuse-values For example:\n$ helm upgrade --namespace oidns \\ --values oid-scaleup-override.yaml \\ oid oid --reuse-values Using --set argument Run the following command to scale up the OID pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set replicaCount=2 \\ \u0026lt;release_name\u0026gt; oid --reuse-values For example:\n$ helm upgrade --namespace oidns \\ --set replicaCount=2 \\ oid oid --reuse-values Verify the pods Verify the new OID pod oidhost3 and has started:\n$ kubectl get pod,service -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oidns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 45m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 45m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost3 1/1 Running 0 17m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/oid-lbr-ldap ClusterIP 10.110.118.113 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 45m service/oidhost1 ClusterIP 10.97.17.125 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP,7001/TCP,7002/TCP 45m service/oidhost2 ClusterIP 10.106.32.187 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 45m service/oidhost3 ClusterIP 10.105.33.184 \u0026lt;none\u0026gt; 3060/TCP,3131/TCP 17m Note: It will take several minutes before all the services listed above show. While the oidhost3 pod has a STATUS of 0/1 the pod is started but the OID server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:\n$ kubectl logs oidhost3 -n oidns Scaling down OID pods Scaling down OID pods is performed in exactly the same as in Scaling up OID pods except the replicaCount is reduced to the required number of pods.\nOnce the kubectl command is executed the pod(s) will move to a Terminating state. In the example below replicaCount was reduced from 2 to 1 and hence oidhost3 has moved to Terminating:\n$ kubectl get pods -n oidns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 49m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 49m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost3 1/1 Terminating 0 21m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; The pod will take a minute or two to stop and then will disappear:\n$ kubectl get pods -n oidns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oidhost1 1/1 Running 0 51m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oidhost2 1/1 Running 0 51m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/manage-oud-containers/scaling-up-down/", + "title": "a) Scaling Up/Down OUD Pods ", + "tags": [], + "description": "Describes the steps for scaling up/down for OUD pods.", + "content": "Introduction This section describes how to increase or decrease the number of OUD pods in the Kubernetes deployment.\nView existing OUD pods By default the oud-ds-rs helm chart deployment starts three pods: oud-ds-rs-0 and two replica pods oud-ds-rs-1 and oud-ds-rs-2.\nThe number of pods started is determined by the replicaCount, which is set to 2 by default. A value of 2 starts the three pods above.\nTo scale up or down the number of OUD pods, set replicaCount accordingly.\nRun the following command to view the number of pods in the OUD deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods -o wide For example:\n$ kubectl --namespace oudns get pods -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 34m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 34m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 34m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Scaling up OUD pods In this example, replicaCount is increased to 3 which creates a new OUD pod oud-ds-rs-3 with associated services created.\nYou can scale up the number of OUD pods using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oud-scaleup-override.yaml file that contains:\nreplicaCount: 3 Run the following command to scale up the OUD pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oud-scaleup-override.yaml \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns \\ --values oud-scaleup-override.yaml \\ oud-ds-rs oud-ds-rs --reuse-values Using --set argument Run the following command to scale up the OUD pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set replicaCount=3 \\ \u0026lt;release_name\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns \\ --set replicaCount=3 \\ oud-ds-rs oud-ds-rs --reuse-values Verify the pods Verify the new OUD pod oud-ds-rs-3 and has started:\n$ kubectl get pod,service -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods,service -n oudns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 45m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 45m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 45m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-3 1/1 Running 0 17m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oud-ds-rs-0 ClusterIP 10.99.232.83 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 37m44s kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-1 ClusterIP 10.100.186.42 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-2 ClusterIP 10.104.55.53 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-3 ClusterIP 10.104.45.52 \u0026lt;none\u0026gt; 1444/TCP,1888/TCP,1898/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-3 service/oud-ds-rs-http-0 ClusterIP 10.102.116.145 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-http-1 ClusterIP 10.111.103.84 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-http-2 ClusterIP 10.105.53.24 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-http-3 ClusterIP 10.106.51.25 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-3 service/oud-ds-rs-lbr-admin ClusterIP 10.98.39.206 \u0026lt;none\u0026gt; 1888/TCP,1444/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-http ClusterIP 10.110.77.132 \u0026lt;none\u0026gt; 1080/TCP,1081/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-lbr-ldap ClusterIP 10.111.55.122 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs service/oud-ds-rs-ldap-0 ClusterIP 10.108.155.81 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-0 service/oud-ds-rs-ldap-1 ClusterIP 10.104.88.44 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-1 service/oud-ds-rs-ldap-2 ClusterIP 10.105.253.120 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 37m44s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-2 service/oud-ds-rs-ldap-3 ClusterIP 10.105.253.55 \u0026lt;none\u0026gt; 1389/TCP,1636/TCP 8m45s app.kubernetes.io/instance=oud-ds-rs,app.kubernetes.io/name=oud-ds-rs,oud/instance=oud-ds-rs-3 Note: It will take several minutes before all the services listed above show. While the oud-ds-rs-3 pod has a STATUS of 0/1 the pod is started but the OUD server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:\n$ kubectl logs oud-ds-rs-3 -n oudns Scaling down OUD pods Scaling down OUD pods is performed in exactly the same as in Scaling up OUD pods except the replicaCount is reduced to the required number of pods.\nOnce the kubectl command is executed the pod(s) will move to a Terminating state. In the example below replicaCount was reduced from 3 to 2 and hence oud-ds-rs-3 has moved to Terminating:\n$ kubectl get pods -n oudns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 49m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 49m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 49m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-3 1/1 Terminating 0 21m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; The pod will take a minute or two to stop and then will disappear:\n$ kubectl get pods -n oudns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oud-ds-rs-0 1/1 Running 0 51m 10.244.0.195 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-1 1/1 Running 0 51m 10.244.0.194 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oud-ds-rs-2 1/1 Running 0 51m 10.244.0.193 \u0026lt;Worker Node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/scaling-up-down/", + "title": "a) Scaling Up/Down OUDSM Pods ", + "tags": [], + "description": "Describes the steps for scaling up/down for OUDSM pods.", + "content": "Introduction This section describes how to increase or decrease the number of OUDSM pods in the Kubernetes deployment.\nView existing OUDSM pods By default the oudsm helm chart deployment starts one pod: oudsm-1.\nThe number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the pod above.\nTo scale up or down the number of OUDSM pods, set replicaCount accordingly.\nRun the following command to view the number of pods in the OUDSM deployment:\n$ kubectl --namespace \u0026lt;namespace\u0026gt; get pods -o wide For example:\n$ kubectl --namespace oudsmns get pods -o wide The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 73m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; Scaling up OUDSM pods In this example, replicaCount is increased to 2 which creates a new OUDSM pod oudsm-2 with associated services created.\nYou can scale up the number of OUDSM pods using one of the following methods:\n Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory:\n$ cd $WORKDIR/kubernetes/helm Create a oudsm-scaleup-override.yaml file that contains:\nreplicaCount: 2 Run the following command to scale up the OUDSM pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --values oudsm-scaleup-override.yaml \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --values oudsm-scaleup-override.yaml \\ oudsm oudsm --reuse-values Using --set argument Run the following command to scale up the OUDSM pods:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; \\ --set replicaCount=2 \\ \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns \\ --set replicaCount=2 \\ oudsm oudsm --reuse-values Verify the pods Verify the new OUDSM pod oudsm-2 has started:\n$ kubectl get pod,service -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods,service -n oudsmns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 88m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oudsm-2 1/1 Running 0 15m 10.245.3.45 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/oudsm-1 ClusterIP 10.96.108.200 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 88m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-1 service/oudsm-2 ClusterIP 10.96.31.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 15m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm,oudsm/instance=oudsm-2 service/oudsm-lbr ClusterIP 10.96.41.201 \u0026lt;none\u0026gt; 7001/TCP,7002/TCP 73m app.kubernetes.io/instance=oudsm,app.kubernetes.io/name=oudsm Note: It will take several minutes before all the services listed above show. While the oudsm-2 pod has a STATUS of 0/1 the pod is started but the OUDSM server associated with it is currently starting. While the pod is starting you can check the startup status in the pod log, by running the following command:\n$ kubectl logs oudsm-2 -n oudsmns Scaling down OUDSM pods Scaling down OUDSM pods is performed in exactly the same as in Scaling up OUDSM pods except the replicaCount is reduced to the required number of pods.\nOnce the kubectl command is executed the pod(s) will move to a Terminating state. In the example below replicaCount was reduced from 2 to 1 and hence oudsm-2 has moved to Terminating:\n$ kubectl get pods -n oudsmns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 92m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/oudsm-2 1/1 Terminating 0 19m 10.245.3.45 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; The pod will take a minute or two to stop and then will disappear:\n$ kubectl get pods -n oudsmns NAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/oudsm-1 1/1 Running 0 94m 10.244.0.19 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/manage-oam-domains/domain-lifecycle/", + "title": "a. Domain Life Cycle", + "tags": [], + "description": "Learn about the domain life cycle of an OAM domain.", + "content": " View existing OAM servers Starting/Scaling up OAM Managed servers Stopping/Scaling down OAM Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself.\nThis document shows the basic operations for starting, stopping and scaling servers in the OAM domain.\nFor more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation.\nDo not use the WebLogic Server Administration Console or Oracle Enterprise Manager Console to start or stop servers.\n View existing OAM servers The default OAM deployment starts the Administration Server (AdminServer), two OAM Managed Servers (oam_server1 and oam_server2) and two OAM Policy Manager server (oam_policy_mgr1 and oam_policy_mgr2 ).\nThe deployment also creates, but doesn\u0026rsquo;t start, three extra OAM Managed Servers (oam-server3 to oam-server5) and three more OAM Policy Manager servers (oam_policy_mgr3 to oam_policy_mgr5).\nAll these servers are visible in the WebLogic Server Console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console by navigating to Domain Structure \u0026gt; oamcluster \u0026gt; Environment \u0026gt; Servers.\nTo view the running servers using kubectl, run the following command:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h29m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h36m accessdomain-oam-policy-mgr1 1/1 Running 0 3h21m accessdomain-oam-policy-mgr2 1/1 Running 0 3h21m accessdomain-oam-server1 1/1 Running 0 3h21m accessdomain-oam-server2 1/1 Running 0 3h21m helper 1/1 Running 0 3h51m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 55m Starting/Scaling up OAM Managed Servers The number of OAM Managed Servers running is dependent on the replicas parameter configured for the cluster. To start more OAM Managed Servers perform the following steps:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\n In the edit session search for clusterName: oam_cluster and look for the replicas parameter. By default the replicas parameter is set to \u0026ldquo;2\u0026rdquo; hence two OAM Managed Servers are started (oam_server1 and oam_server2):\n clusters: - clusterName: oam_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To start more OAM Managed Servers, increase the replicas value as desired. In the example below, two more managed servers will be started by setting replicas to \u0026ldquo;4\u0026rdquo;:\n clusters: - clusterName: oam_cluster replicas: 4 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq!)\nThe output will look similar to the following:\ndomain.weblogic.oracle/accessdomain edited Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h33m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h40m accessdomain-oam-policy-mgr1 1/1 Running 0 3h25m accessdomain-oam-policy-mgr2 1/1 Running 0 3h25m accessdomain-oam-server1 1/1 Running 0 3h25m accessdomain-oam-server2 1/1 Running 0 3h25m accessdomain-oam-server3 0/1 Running 0 9s accessdomain-oam-server4 0/1 Running 0 9s helper 1/1 Running 0 3h55m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 59m Two new pods (accessdomain-oam-server3 and accessdomain-oam-server4) are started, but currently have a READY status of 0/1. This means oam_server3 and oam_server4 are not currently running but are in the process of starting. The servers will take several minutes to start so keep executing the command until READY shows 1/1:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h37m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h43m accessdomain-oam-policy-mgr1 1/1 Running 0 3h29m accessdomain-oam-policy-mgr2 1/1 Running 0 3h29m accessdomain-oam-server1 1/1 Running 0 3h29m accessdomain-oam-server2 1/1 Running 0 3h29m accessdomain-oam-server3 1/1 Running 0 3m45s accessdomain-oam-server4 1/1 Running 0 3m45s helper 1/1 Running 0 3h59m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 63m Note: To check what is happening during server startup when READY is 0/1, run the following command to view the log of the pod that is starting:\n$ kubectl logs \u0026lt;pod\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl logs accessdomain-oam-server3 -n oamns To start more OAM Policy Manager servers, repeat the previous commands but change the replicas parameter for the policy_cluster. In the example below replicas has been increased to \u0026ldquo;4\u0026rdquo;:\n - clusterName: policy_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) After saving the changes two new pods will be started (accessdomain-oam-policy-mgr3 and accessdomain-oam-policy-mgr4). After a few minutes they will have a READY status of 1/1. In the example below accessdomain-oam-policy-mgr3 and accessdomain-oam-policy-mgr4 are started:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h43m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h49m accessdomain-oam-policy-mgr1 1/1 Running 0 3h35m accessdomain-oam-policy-mgr2 1/1 Running 0 3h35m accessdomain-oam-policy-mgr3 1/1 Running 0 4m18s accessdomain-oam-policy-mgr4 1/1 Running 0 4m18s accessdomain-oam-server1 1/1 Running 0 3h35m accessdomain-oam-server2 1/1 Running 0 3h35m accessdomain-oam-server3 1/1 Running 0 9m27s accessdomain-oam-server4 1/1 Running 0 9m27s helper 1/1 Running 0 4h4m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 69m Stopping/Scaling down OAM Managed Servers As mentioned in the previous section, the number of OAM Managed Servers running is dependent on the replicas parameter configured for the cluster. To stop one or more OAM Managed Servers, perform the following:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns In the edit session search for clusterName: oam_cluster and look for the replicas parameter. In the example below replicas is set to \u0026ldquo;4\u0026rdquo;, hence four OAM Managed Servers are started (access-domain-oam_server1 - access-domain-oam_server4):\n clusters: - clusterName: oam_cluster replicas: 4 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) To stop OAM Managed Servers, decrease the replicas value as desired. In the example below, we will stop two managed servers by setting replicas to \u0026ldquo;2\u0026rdquo;:\n clusters: - clusterName: oam_cluster replicas: 2 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) Save the file and exit (:wq!)\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h45m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h51m accessdomain-oam-policy-mgr1 1/1 Running 0 3h37m accessdomain-oam-policy-mgr2 1/1 Running 0 3h37m accessdomain-oam-policy-mgr3 1/1 Running 0 6m18s accessdomain-oam-policy-mgr4 1/1 Running 0 6m18s accessdomain-oam-server1 1/1 Running 0 3h37m accessdomain-oam-server2 1/1 Running 0 3h37m accessdomain-oam-server3 1/1 Running 0 11m accessdomain-oam-server4 1/1 Terminating 0 11m helper 1/1 Running 0 4h6m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 71m One pod now has a STATUS of Terminating (accessdomain-oam-server4). The server will take a minute or two to stop. Once terminated the other pod (accessdomain-oam-server3) will move to Terminating and then stop. Keep executing the command until the pods have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h48m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h54m accessdomain-oam-policy-mgr1 1/1 Running 0 3h40m accessdomain-oam-policy-mgr2 1/1 Running 0 3h40m accessdomain-oam-policy-mgr3 1/1 Running 0 9m18s accessdomain-oam-policy-mgr4 1/1 Running 0 9m18s accessdomain-oam-server1 1/1 Running 0 3h40m accessdomain-oam-server2 1/1 Running 0 3h40m helper 1/1 Running 0 4h9m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 74m To stop OAM Policy Manager servers, repeat the previous commands but change the replicas parameter for the policy_cluster. In the example below replicas has been decreased from \u0026ldquo;4\u0026rdquo; to \u0026ldquo;2\u0026rdquo;:\n - clusterName: policy_cluster replicas: 1 serverPod: affinity: podAntiAffinity: preferredDuringSchedulingIgnoredDuringExecution: - podAffinityTerm: labelSelector: matchExpressions: - key: weblogic.clusterName operator: In values: - $(CLUSTER_NAME) After saving the changes one pod will move to a STATUS of Terminating (accessdomain-oam-policy-mgr4).\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h49m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h55m accessdomain-oam-policy-mgr1 1/1 Running 0 3h41m accessdomain-oam-policy-mgr2 1/1 Running 0 3h41m accessdomain-oam-policy-mgr3 1/1 Running 0 10m accessdomain-oam-policy-mgr4 1/1 Terminating 0 10m accessdomain-oam-server1 1/1 Running 0 3h41m accessdomain-oam-server2 1/1 Running 0 3h41m helper 1/1 Running 0 4h11m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 75m The pods will take a minute or two to stop, so keep executing the command until the pods has disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 3h50m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h57m accessdomain-oam-policy-mgr1 1/1 Running 0 3h42m accessdomain-oam-policy-mgr2 1/1 Running 0 3h42m accessdomain-oam-server1 1/1 Running 0 3h42m accessdomain-oam-server2 1/1 Running 0 3h42m helper 1/1 Running 0 4h12m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 76m Stopping and Starting the Administration Server and Managed Servers To stop all the OAM Managed Servers and the Administration Server in one operation:\n Run the following kubectl command to edit the domain:\n$ kubectl edit domain \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns In the edit session search for serverStartPolicy: IF_NEEDED:\n volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: IF_NEEDED webLogicCredentialsSecret: name: accessdomain-credentials Change serverStartPolicy: IF_NEEDED to NEVER as follows:\n volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: NEVER webLogicCredentialsSecret: name: accessdomain-credentials Save the file and exit (:wq!).\n Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Terminating 0 3h52m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 3h59m accessdomain-oam-policy-mgr1 1/1 Terminating 0 3h44m accessdomain-oam-policy-mgr2 1/1 Terminating 0 3h44m accessdomain-oam-server1 1/1 Terminating 0 3h44m accessdomain-oam-server2 1/1 Terminating 0 3h44m helper 1/1 Running 0 4h14m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 78m The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h helper 1/1 Running 0 4h15m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 80m To start the Administration Server and Managed Servers up again, repeat the previous steps but change serverStartPolicy: NEVER to IF_NEEDED as follows:\n volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc serverStartPolicy: IF_NEEDED webLogicCredentialsSecret: name: accessdomain-credentials Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h1m accessdomain-introspector-jwqxw 1/1 Running 0 10s helper 1/1 Running 0 4h17m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 81m The Administration Server pod will start followed by the OAM Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1 :\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 10m accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 4h12m accessdomain-oam-policy-mgr1 1/1 Running 0 7m35s accessdomain-oam-policy-mgr2 1/1 Running 0 7m35s accessdomain-oam-server1 1/1 Running 0 7m35s accessdomain-oam-server2 1/1 Running 0 7m35s helper 1/1 Running 0 4h28m nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 92m Domain lifecycle sample scripts The WebLogic Kubernetes Operator provides sample scripts to start up or shut down a specific Managed Server or cluster in a deployed domain, or the entire deployed domain.\nNote: Prior to running these scripts, you must have previously created and deployed the domain.\nThe scripts are located in the $WORKDIR/kubernetes/domain-lifecycle directory. For more information, see the README.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/patch-and-upgrade/patch_an_image/", + "title": "a. Patch an image", + "tags": [], + "description": "Instructions on how to update your OAM Kubernetes cluster with a new OAM container image.", + "content": "Choose one of the following options to update your OAM kubernetes cluster to use the new image:\n Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers.\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nRun the kubectl edit domain command To update the domain with the kubectl edit domain command, run the following:\n$ kubectl edit domain \u0026lt;domainname\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit domain accessdomain -n oamns If using Oracle Container Registry or your own container registry for your OAM container image, update the image to point at the new image, for example:\ndomainHomeInImage: false image: container-registry.oracle.com/middleware/oam_cpu:\u0026lt;tag\u0026gt; imagePullPolicy: IfNotPresent If you are not using a container registry and have loaded the image on each of the master and worker nodes, update the image to point at the new image:\ndomainHomeInImage: false image: oracle/oam:\u0026lt;tag\u0026gt; imagePullPolicy: IfNotPresent Save the file and exit (:wq!)\n Run the kubectl patch command To update the domain with the kubectl patch domain command, run the following:\n$ kubectl patch domain \u0026lt;domain\u0026gt; -n \u0026lt;namespace\u0026gt; --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;newimage:tag\u0026#34;}}\u0026#39; For example, if using Oracle Container Registry or your own container registry for your OAM container image:\n$ kubectl patch domain accessdomain -n oamns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;container-registry.oracle.com/middleware/oam_cpu:\u0026lt;tag\u0026gt;\u0026#34;}}\u0026#39; For example, if you are not using a container registry and have loaded the image on each of the master and worker nodes:\n$ kubectl patch domain accessdomain -n oamns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;oracle/oam:\u0026lt;tag\u0026gt;\u0026#34;}}\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/accessdomain patched " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/patch-and-upgrade/patch_an_image/", + "title": "a. Patch an image", + "tags": [], + "description": "Instructions on how to update your OIG Kubernetes cluster with a new OIG container image.", + "content": "Choose one of the following options to update your OIG kubernetes cluster to use the new image:\n Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OIG Managed Servers.\nNote: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster.\nRun the kubectl edit domain command To update the domain with the kubectl edit domain command, run the following:\n$ kubectl edit domain \u0026lt;domainname\u0026gt; -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit domain governancedomain -n oigns If using Oracle Container Registry or your own container registry for your OIG container image, update the image to point at the new image, for example:\ndomainHomeInImage: false image: container-registry.oracle.com/middleware/oig_cpu:\u0026lt;tag\u0026gt; imagePullPolicy: IfNotPresent If you are not using a container registry and have loaded the image on each of the master and worker nodes, update the image to point at the new image:\ndomainHomeInImage: false image: oracle/oig:\u0026lt;tag\u0026gt; imagePullPolicy: IfNotPresent Save the file and exit (:wq!)\n Run the kubectl patch command To update the domain with the kubectl patch domain command, run the following:\n$ kubectl patch domain \u0026lt;domain\u0026gt; -n \u0026lt;namespace\u0026gt; --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;newimage:tag\u0026#34;}}\u0026#39; For example, if using Oracle Container Registry or your own container registry for your OIG container image:\n$ kubectl patch domain governancedomain -n oigns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;container-registry.oracle.com/middleware/oig_cpu:\u0026lt;tag\u0026gt;\u0026#34;}}\u0026#39; For example, if you are not using a container registry and have loaded the image on each of the master and worker nodes:\n$ kubectl patch domain governancedomain -n oigns --type merge -p \u0026#39;{\u0026#34;spec\u0026#34;:{\u0026#34;image\u0026#34;:\u0026#34;oracle/oig:\u0026lt;tag\u0026gt;\u0026#34;}}\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain patched Patch the database schemas Once the image has been updated you must patch the schemas in the database.\n Check to see if the helper pod exists by running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; | grep helper For example:\n$ kubectl get pods -n oigns | grep helper The output should look similar to the following:\nhelper 1/1 Running 0 26h If the helper pod exists delete the pod with following command:\n$ kubectl delete pod helper -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl delete pod helper -n oigns Create a new helper pod by following the instructions in Prepare you environment . Note: The new helper pod should be started using the new image.\n Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oigns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ In the helper bash shell run the following commands to set the environment:\n[oracle@helper oracle]$ export DB_HOST=\u0026lt;db_host.domain\u0026gt; [oracle@helper oracle]$ export DB_PORT=\u0026lt;db_port\u0026gt; [oracle@helper oracle]$ export DB_SERVICE=\u0026lt;service_name\u0026gt; [oracle@helper oracle]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;rcu_schema_pwd\u0026gt; [oracle@helper oracle]$ echo -e \u0026lt;db_pwd\u0026gt;\u0026#34;\\n\u0026#34;\u0026lt;rcu_schema_pwd\u0026gt; \u0026gt; /tmp/pwd.txt [oracle@helper oracle]$ cat /tmp/pwd.txt where:\n\u0026lt;db_host.domain\u0026gt; is the database server hostname\n\u0026lt;db_port\u0026gt; is the database listener port\n\u0026lt;service_name\u0026gt; is the database service name\n\u0026lt;rcu_schema_prefix\u0026gt; is the RCU schema prefix you want to set\n\u0026lt;rcu_schema_pwd\u0026gt; is the password you want to set for the \u0026lt;rcu_schema_prefix\u0026gt;\n\u0026lt;db_pwd\u0026gt; is the SYS password for the database\nFor example:\n[oracle@helper oracle]$ export DB_HOST=mydatabasehost.example.com [oracle@helper oracle]$ export DB_PORT=1521 [oracle@helper oracle]$ export DB_SERVICE=orcl.example.com [oracle@helper oracle]$ export RCUPREFIX=OIGK8S [oracle@helper oracle]$ export RCU_SCHEMA_PWD=\u0026lt;password\u0026gt; Run the following command to patch the schemas:\nThis command should be run if you are using an OIG image that contains OIG bundle patches. If using an OIG image without OIG bundle patches, then you can skip this step.\n [oracle@helper oracle]$ /u01/oracle/oracle_common/modules/thirdparty/org.apache.ant/1.10.5.0.0/apache-ant-1.10.5/bin/ant \\ -f /u01/oracle/idm/server/setup/deploy-files/automation.xml \\ run-patched-sql-files \\ -logger org.apache.tools.ant.NoBannerLogger \\ -logfile /u01/oracle/idm/server/bin/patch_oim_wls.log \\ -DoperationsDB.host=$DB_HOST \\ -DoperationsDB.port=$DB_PORT \\ -DoperationsDB.serviceName=$DB_SERVICE \\ -DoperationsDB.user=${RCUPREFIX}_OIM \\ -DOIM.DBPassword=$RCU_SCHEMA_PWD \\ -Dojdbc=/u01/oracle/oracle_common/modules/oracle.jdbc/ojdbc8.jar The output will look similar to the following:\nBuildfile: /u01/oracle/idm/server/setup/deploy-files/automation.xml Verify the database was patched successfully by viewing the patch_oim_wls.log:\n[oracle@helper oracle]$ cat /u01/oracle/idm/server/bin/patch_oim_wls.log The output should look similar to below:\n ... [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/StoredProcedures/OfflineDataPurge/oim_pkg_offline_datapurge_pkg_body.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_RequestJustificationLocale.sql [sql] Executing resource: /u01/oracle/idm/server/db/oim/oracle/Upgrade/oim12cps4/list/oim12cps4_dml_pty_insert_sysprop_reportee_chain_for_mgr.sql [sql] 36 of 36 SQL statements executed successfully BUILD SUCCESSFUL Total time: 5 second " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/post-install-config/set_oimfronendurl_using_mbeans/", + "title": "a. Post Install Tasks", + "tags": [], + "description": "Perform post install tasks.", + "content": "Follow these post install configuration steps.\n Create a Server Overrides File Set OIMFrontendURL using MBeans Create a Server Overrides File Navigate to the following directory:\ncd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Create a setUserOverrides.sh with the following contents:\nDERBY_FLAG=false JAVA_OPTIONS=\u0026quot;${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true\u0026quot; MEM_ARGS=\u0026quot;-Xms8192m -Xmx8192m\u0026quot; Copy the setUserOverrides.sh file to the Administration Server pod:\n$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh Where oigns is the OIG namespace and governancedomain is the domain_UID.\n Stop the OIG domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oigns patch domains governancedomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NEVER\u0026#34; }]\u0026#39; The output will look similar to the following:\ndomain.weblogic.oracle/governancedomain patched Check that all the pods are stopped:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Terminating 0 18h governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Terminating 0 18h governancedomain-soa-server1 1/1 Terminating 0 18h helper 1/1 Running 0 41h The Administration Server pods and Managed Server pods will move to a STATUS of Terminating. After a few minutes, run the command again and the pods should have disappeared:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw-infra-domain-job-8cww8 0/1 Completed 0 24h helper 1/1 Running 0 41h Start the domain using the following command:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; patch domains \u0026lt;domain_uid\u0026gt; --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; For example:\n$ kubectl -n oigns patch domains governancedomain --type=\u0026#39;json\u0026#39; -p=\u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/serverStartPolicy\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;IF_NEEDED\u0026#34; }]\u0026#39; Run the following kubectl command to view the pods:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pods -n oigns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE governancedomain-create-fmw -infra-domain-job-vj69h 0/1 Completed 0 24h governancedomain-introspect-domain-job-7qx29 1/1 Running 0 8s helper 1/1 Running 0 41h The Administration Server pod will start followed by the OIG Managed Servers pods. This process will take several minutes, so keep executing the command until all the pods are running with READY status 1/1:\nNAME READY STATUS RESTARTS AGE governancedomain-adminserver 1/1 Running 0 6m4s governancedomain-create-fmw-infra-domain-job-vj69h 0/1 Completed 0 24h governancedomain-oim-server1 1/1 Running 0 3m5s governancedomain-soa-server1 1/1 Running 0 3m5s helper 1/1 Running 0 41h Set OIMFrontendURL using MBeans Login to Oracle Enterprise Manager using the following URL:\nhttps://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/em\n Click the Target Navigation icon in the top left of the screen and navigate to the following:\n Expand Identity and Access \u0026gt; Access \u0026gt; OIM \u0026gt; oim Right click the instance oim and select System MBean Browser Under Application Defined MBeans, navigate to oracle.iam, Server:oim_server1 \u0026gt; Application:oim \u0026gt; XMLConfig \u0026gt; Config \u0026gt; XMLConfig.DiscoveryConfig \u0026gt; Discovery. Enter a new value for the OimFrontEndURL attribute, in the format:\n If using an External LoadBalancer for your ingress: https://${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT} If using NodePort for your ingress: http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} Then click Apply.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/", + "title": "a. Using an Ingress with NGINX (non-SSL)", + "tags": [], + "description": "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain (non-SSL).", + "content": "Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination.\nNote: All the steps below should be performed on the master node.\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\nd. Setup routing rules for the domain\n Create an ingress for the domain\n Verify that you can access the domain URL\n Install NGINX Use helm to install NGINX.\nConfigure the repository Add the Helm chart repository for NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX by running the following command:\n$ kubectl create namespace nginx The output will look similar to the following:\nnamespace/nginx created Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster,for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n nginx --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Thu, 10 Mar 2022 14:13:33 GMT NAMESPACE: nginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export HTTPS_NODE_PORT=$(kubectl --namespace nginx get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-ingress-nginx-controller) export NODE_IP=$(kubectl --namespace nginx get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n nginx --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Thu Mar 10 14:15:33 2022 NAMESPACE: nginx STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace nginx get services -o wide -w nginx-ingress-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Setup routing rules for the domain Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Also change sslType to NONSSL. The file should look as follows:\n# Load balancer type. Supported values are: TRAEFIK, NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL, SSL sslType: NONSSL # TimeOut value to be set for nginx parameters proxy-read-timeout and proxy-send-timeout nginxTimeOut: 180 # TLS secret name if the mode is SSL secretName: domain1-tls-cert #WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 soaClusterName: soa_cluster soaManagedServerPort: 8001 oimClusterName: oim_cluster oimManagedServerPort: 14000 Create an ingress for the domain Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace \u0026lt;namespace\u0026gt; --values kubernetes/charts/ingress-per-domain/values.yaml Note: The \u0026lt;workdir\u0026gt;/samples/kubernetes/charts/ingress-per-domain/templates//nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.\nFor example:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\n$ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml NAME: governancedomain-nginx LAST DEPLOYED: Thu Mar 10 14:18:23 2022 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get ing -n oigns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE governancedomain-nginx \u0026lt;none\u0026gt; * x.x.x.x 80 47s Find the NodePort of NGINX using the following command (only if you installed NGINX using NodePort):\n$ kubectl get services -n nginx -o jsonpath=”{.spec.ports[0].nodePort}” nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n31530 Run the following command to check the ingress:\n$ kubectl describe ing governancedomain-ingress -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx -n oigns The output will look similar to the following:\nName: governancedomain-nginx Namespace: oigns Address: Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console governancedomain-adminserver:7001 (10.244.2.59:7001) /em governancedomain-adminserver:7001 (10.244.2.59:7001) /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.60:8001) /identity governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /admin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /oim governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /iam governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /ucs governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.1.25:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/enable-access-log: false Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 35s nginx-ingress-controller Scheduled for sync To confirm that the new ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n$ curl -v http://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\na) For NodePort\n$ curl -v http://masternode.example.com:31530/weblogic/ready b) For LoadBalancer\n$ curl -v http://masternode.example.com:80/weblogic/ready The output will look similar to the following:\n$ curl -v http://masternode.example.com:31530/weblogic/ready * About to connect() to masternode.example.com port 31530 (#0) * Trying X.X.X.X... * Connected to masternode.example.com (X.X.X.X) port 31530 (#0) \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: masternode.example.com:31530 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Server: nginx/1.19.2 \u0026lt; Date: Thu Mar 10 14:21:14 2022 \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; * Connection #0 to host masternode.example.com left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 31530) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/manage-oud-containers/logging-and-visualization/", + "title": "b) Logging and Visualization for Helm Chart oud-ds-rs Deployment", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": " Introduction Installation Create a Kubernetes secret Enable Elasticsearch, Logstash, and Kibana Upgrade OUD deployment with ELK configuration Verify the pods Verify using the Kibana application Introduction This section describes how to install and configure logging and visualization for the oud-ds-rs Helm chart deployment.\nThe ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications.\n Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you\u0026rsquo;re looking for. Installation ELK can be enabled for environments created using the Helm charts provided. The example below will demonstrate installation and configuration of ELK for the oud-ds-rs chart.\nCreate a Kubernetes secret A Kubernetes secret to access the required images on hub.docker.com should have been previously created in Create OUD instances.\nIf you have not yet created a Kubernetes secret refer back to Create OUD instances.\nEnable Elasticsearch, Logstash, and Kibana Create a directory on the persistent volume to store the ELK log files:\n$ mkdir -p \u0026lt;persistent_volume\u0026gt;/oud_elk_data $ chmod 777 \u0026lt;persistent_volume\u0026gt;/oud_elk_data For example:\n$ mkdir -p /scratch/shared/oud_elk_data $ chmod 777 /scratch/shared/oud_elk_data Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values.yaml with the following:\nelk: enabled: true imagePullSecrets: - name: dockercred elkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: filesystem filesystem: hostPath: path: \u0026lt;persistent_volume\u0026gt;/oud_elk_data For example:\nelk: enabled: true imagePullSecrets: - name: dockercred elkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: filesystem filesystem: hostPath: path: /scratch/shared/oud_elk_data If using NFS for the persistent volume change the elkVolume section as follows:\nelkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: networkstorage networkstorage: nfs: server: myserver path: \u0026lt;persistent_volume\u0026gt;/oud_elk_data Upgrade OUD deployment with ELK configuration Run the following command to upgrade the OUD deployment with the ELK configuration:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; --values \u0026lt;valuesfile.yaml\u0026gt; \u0026lt;releasename\u0026gt; oud-ds-rs --reuse-values For example:\n$ helm upgrade --namespace oudns --values logging-override-values.yaml oud-ds-rs oud-ds-rs --reuse-values Verify the pods Run the following command to verify the elasticsearch, logstash and kibana pods are running:\n$ kubectl get pods -o wide -n \u0026lt;namespace\u0026gt; | grep \u0026#39;es\\|kibana\\|logstash\u0026#39; For example:\n$ kubectl get pods -o wide -n oudns | grep \u0026#39;es\\|kibana\\|logstash\u0026#39; The output will look similar to the following:\noud-ds-rs-es-cluster-0 1/1 Running 0 6m28s oud-ds-rs-kibana-7b7769485f-b9mr4 1/1 Running 0 6m28s oud-ds-rs-logstash-5995948d7f-nqlh6 1/1 Running 0 6m28s From the above identify the elasticsearch pod, for example: oud-ds-rs-es-cluster-0.\n Run the port-forward command to allow elasticsearch to listen on port 9200:\n$ kubectl port-forward oud-ds-rs-es-cluster-0 9200:9200 --namespace=\u0026lt;namespace\u0026gt; \u0026amp; For example:\n$ kubectl port-forward oud-ds-rs-es-cluster-0 9200:9200 --namespace=oudns \u0026amp; The output will look similar to the following:\n[1] 98458 bash-4.2$ Forwarding from 127.0.0.1:9200 -\u0026gt; 9200 Forwarding from [::1]:9200 -\u0026gt; 9200 Verify that elasticsearch is running by interrogating port 9200:\n$ curl http://localhost:9200 The output will look similar to the following:\n{ \u0026#34;name\u0026#34; : \u0026#34;oud-ds-rs-es-cluster-0\u0026#34;, \u0026#34;cluster_name\u0026#34; : \u0026#34;OUD-elk\u0026#34;, \u0026#34;cluster_uuid\u0026#34; : \u0026#34;J42fuv_XSHGy-uolRyNEtA\u0026#34;, \u0026#34;version\u0026#34; : { \u0026#34;number\u0026#34; : \u0026#34;6.8.0\u0026#34;, \u0026#34;build_flavor\u0026#34; : \u0026#34;default\u0026#34;, \u0026#34;build_type\u0026#34; : \u0026#34;docker\u0026#34;, \u0026#34;build_hash\u0026#34; : \u0026#34;65b6179\u0026#34;, \u0026#34;build_date\u0026#34; : \u0026#34;2019-05-15T20:06:13.172855Z\u0026#34;, \u0026#34;build_snapshot\u0026#34; : false, \u0026#34;lucene_version\u0026#34; : \u0026#34;7.7.0\u0026#34;, \u0026#34;minimum_wire_compatibility_version\u0026#34; : \u0026#34;5.6.0\u0026#34;, \u0026#34;minimum_index_compatibility_version\u0026#34; : \u0026#34;5.0.0\u0026#34; }, \u0026#34;tagline\u0026#34; : \u0026#34;You Know, for Search\u0026#34; } Verify using the Kibana application List the Kibana application service using the following command:\n$ kubectl get svc -o wide -n \u0026lt;namespace\u0026gt; | grep kibana For example:\n$ kubectl get svc -o wide -n oudns | grep kibana The output will look similar to the following:\noud-ds-rs-kibana NodePort 10.103.169.218 \u0026lt;none\u0026gt; 5601:31199/TCP 13m app=kibana In this example, the port to access the Kibana application is 31199.\n Access the Kibana console in a browser with: http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana.\n From the Kibana portal navigate to Management\u0026gt; Kibana \u0026gt; Index Patterns.\n In the Create Index Pattern page enter * for the Index pattern and click Next Step.\n In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the OUD logs.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/logging-and-visualization/", + "title": "b) Logging and Visualization for Helm Chart oudsm Deployment", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": " Introduction Installation Create a Kubernetes secret Enable Elasticsearch, Logstash, and Kibana Upgrade OUDSM deployment with ELK configuration Verify the pods Verify using the Kibana application Introduction This section describes how to install and configure logging and visualization for the oudsm Helm Chart deployment.\nThe ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK we can gain insights in real-time from the log data from your applications.\n Elasticsearch is a distributed, RESTful search and analytics engine capable of solving a growing number of use cases. As the heart of the Elastic Stack, it centrally stores your data so you can discover the expected and uncover the unexpected. Logstash is an open source, server-side data processing pipeline that ingests data from a multitude of sources simultaneously, transforms it, and then sends it to your favorite “stash.” Kibana lets you visualize your Elasticsearch data and navigate the Elastic Stack. It gives you the freedom to select the way you give shape to your data. And you don’t always have to know what you\u0026rsquo;re looking for. Installation ELK can be enabled for environments created using the Helm charts provided. The example below will demonstrate installation and configuration of ELK for the oudsm chart.\nCreate a Kubernetes secret Create a Kubernetes secret to access the required images on hub.docker.com:\nNote: You must first have a user account on hub.docker.com.\n$ kubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; --docker-username=\u0026#34;\u0026lt;docker_username\u0026gt;\u0026#34; --docker-password=\u0026lt;password\u0026gt; --docker-email=\u0026lt;docker_email_credentials\u0026gt; --namespace=\u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create secret docker-registry \u0026quot;dockercred\u0026quot; --docker-server=\u0026quot;https://index.docker.io/v1/\u0026quot; --docker-username=\u0026quot;username\u0026quot; --docker-password=\u0026lt;password\u0026gt; --docker-email=user@example.com --namespace=oudsmns The output will look similar to the following:\nsecret/dockercred created Enable Elasticsearch, Logstash, and Kibana Create a directory on the persistent volume to store the ELK log files:\n$ mkdir -p \u0026lt;persistent_volume\u0026gt;/oudsm_elk_data $ chmod 777 \u0026lt;persistent_volume\u0026gt;/oudsm_elk_data For example:\n$ mkdir -p /scratch/shared/oudsm_elk_data $ chmod 777 /scratch/shared/oudsm_elk_data Navigate to the $WORKDIR/kubernetes/helm directory and create a logging-override-values.yaml with the following:\nelk: enabled: true imagePullSecrets: - name: dockercred elkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: filesystem filesystem: hostPath: path: \u0026lt;persistent_volume\u0026gt;/oudsm_elk_data For example:\nelk: enabled: true imagePullSecrets: - name: dockercred elkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: filesystem filesystem: hostPath: path: /scratch/shared/oudsm_elk_data If using NFS for the persistent volume change the elkVolume section as follows:\nelkVolume: # If enabled, it will use the persistent volume. # if value is false, PV and PVC would not be used and there would not be any mount point available for config enabled: true type: networkstorage networkstorage: nfs: server: myserver path: \u0026lt;persistent_volume\u0026gt;/oudsm_elk_data Upgrade OUDSM deployment with ELK configuration Run the following command to upgrade the OUDSM deployment with the ELK configuration:\n$ helm upgrade --namespace \u0026lt;namespace\u0026gt; --values logging-override-values.yaml \u0026lt;release_name\u0026gt; oudsm --reuse-values For example:\n$ helm upgrade --namespace oudsmns --values logging-override-values.yaml oudsm oudsm --reuse-values Verify the pods Run the following command to verify the elasticsearch, logstash and kibana pods are running:\n$ kubectl get pods -o wide -n \u0026lt;namespace\u0026gt; | grep \u0026#39;es\\|kibana\\|logstash\u0026#39; For example:\n$ kubectl get pods -o wide -n oudsmns | grep \u0026#39;es\\|kibana\\|logstash\u0026#39; The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES oudsm-es-cluster-0 1/1 Running 0 4m5s 10.244.1.124 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oudsm-kibana-7bf95b4c45-sfst6 1/1 Running 1 4m5s 10.244.2.137 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; oudsm-logstash-5bb6bc67bf-l4mdv 1/1 Running 0 4m5s 10.244.2.138 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; From the above identify the elasticsearch pod, for example: oudsm-es-cluster-0.\n Run the port-forward command to allow ElasticSearch to be listening on port 9200:\n$ kubectl port-forward oudsm-es-cluster-0 9200:9200 --namespace=\u0026lt;namespace\u0026gt; \u0026amp; For example:\n$ kubectl port-forward oudsm-es-cluster-0 9200:9200 --namespace=oudsmns \u0026amp; The output will look similar to the following:\n[1] 98458 bash-4.2$ Forwarding from 127.0.0.1:9200 -\u0026gt; 9200 Forwarding from [::1]:9200 -\u0026gt; 9200 Verify that ElasticSearch is running by interrogating port 9200:\n$ curl http://localhost:9200 The output will look similar to the following:\n{ \u0026#34;name\u0026#34; : \u0026#34;oudsm-es-cluster-0\u0026#34;, \u0026#34;cluster_name\u0026#34; : \u0026#34;OUD-elk\u0026#34;, \u0026#34;cluster_uuid\u0026#34; : \u0026#34;TIKKJuK4QdWcOZrEOA1zeQ\u0026#34;, \u0026#34;version\u0026#34; : { \u0026#34;number\u0026#34; : \u0026#34;6.8.0\u0026#34;, \u0026#34;build_flavor\u0026#34; : \u0026#34;default\u0026#34;, \u0026#34;build_type\u0026#34; : \u0026#34;docker\u0026#34;, \u0026#34;build_hash\u0026#34; : \u0026#34;65b6179\u0026#34;, \u0026#34;build_date\u0026#34; : \u0026#34;2019-05-15T20:06:13.172855Z\u0026#34;, \u0026#34;build_snapshot\u0026#34; : false, \u0026#34;lucene_version\u0026#34; : \u0026#34;7.7.0\u0026#34;, \u0026#34;minimum_wire_compatibility_version\u0026#34; : \u0026#34;5.6.0\u0026#34;, \u0026#34;minimum_index_compatibility_version\u0026#34; : \u0026#34;5.0.0\u0026#34; }, \u0026#34;tagline\u0026#34; : \u0026#34;You Know, for Search\u0026#34; } Verify using the Kibana application List the Kibana application service using the following command:\n$ kubectl get svc -o wide -n \u0026lt;namespace\u0026gt; | grep kibana For example:\n$ kubectl get svc -o wide -n oudsmns | grep kibana The output will look similar to the following:\noudsm-kibana NodePort 10.101.248.248 \u0026lt;none\u0026gt; 5601:31195/TCP 7m56s app=kibana In this example, the port to access Kibana application via a Web browser will be 31195.\n Access the Kibana console in a browser with: http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana.\n From the Kibana portal navigate to Management\u0026gt; Kibana \u0026gt; Index Patterns.\n In the Create Index Pattern page enter * for the Index pattern and click Next Step.\n In the Configure settings page, from the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the OUDSM logs.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/post-install-config/install_and_configure_connectors/", + "title": "b. Install and configure connectors", + "tags": [], + "description": "Install and Configure Connectors.", + "content": "Download the connector Download the Connector you are interested in from Oracle Identity Manager Connector Downloads.\n Copy the connector zip file to a staging directory on the master node e.g. \u0026lt;workdir\u0026gt;/stage and unzip it:\n$ cp $HOME/Downloads/\u0026lt;connector\u0026gt;.zip \u0026lt;workdir\u0026gt;/\u0026lt;stage\u0026gt;/ $ cd \u0026lt;workdir\u0026gt;/\u0026lt;stage\u0026gt; $ unzip \u0026lt;connector\u0026gt;.zip $ chmod -R 755 * For example:\n$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip $ chmod -R 755 * Copy OIG connectors There are two options to copy OIG Connectors to your Kubernetes cluster:\n a) Copy the connector directly to the Persistent Volume b) Use the kubectl cp command to copy the connector to the Persistent Volume It is recommended to use option a), however there may be cases, for example when using a Managed Service such as Oracle Kubernetes Engine on Oracle Cloud Infrastructure, where it may not be feasible to directly mount the domain directory. In such cases option b) should be used.\na) Copy the connector directly to the persistent volume Copy the connector zip file to the persistent volume. For example:\n$ cp -R \u0026lt;path_to\u0026gt;/\u0026lt;connector\u0026gt; \u0026lt;persistent_volume\u0026gt;/governancedomainpv/ConnectorDefaultDirectory/ For example:\n$ cp -R /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 /scratch/shared/governancedomainpv/ConnectorDefaultDirectory/ b) Use the kubectl cp command to copy the connector to the persistent volume Run the following command to copy over the connector:\n$ kubectl -n \u0026lt;domain_namespace\u0026gt; cp \u0026lt;path_to\u0026gt;/\u0026lt;connector\u0026gt; \u0026lt;cluster_name\u0026gt;:/u01/oracle/idm/server/ConnectorDefaultDirectory/ For example:\n$ kubectl -n oigns cp /scratch/OIGK8S/stage/Exchange-12.2.1.3.0 governancedomain-oim-server1:/u01/oracle/idm/server/ConnectorDefaultDirectory/ Install the connector The connectors are installed as they are on a standard on-premises setup, via Application On Boarding or via Connector Installer.\nRefer to your Connector specific documentation for instructions.\n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/patch-and-upgrade/upgrade_an_operator_release/", + "title": "b. Upgrade an operator release", + "tags": [], + "description": "Instructions on how to update the WebLogic Kubernetes Operator version.", + "content": "These instructions apply to upgrading the operator within the 3.x release family as additional versions are released.\n On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:\n$ mkdir \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X For example:\n$ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X This will create the directory \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator\n Run the following helm command to upgrade the operator:\n$ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.X.X --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator For example:\n$ cd /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.X.X --namespace opns --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: Mon Mar 7 18:36:10 2021 NAMESPACE: opns STATUS: deployed REVISION: 3 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-69546866bd-h58sk 2/2 Running 0 112s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.106.72.42 \u0026lt;none\u0026gt; 8082/TCP 2d NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 2d NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 0 0 0 2d replicaset.apps/weblogic-operator-69546866bd 1 1 1 112s " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/patch-and-upgrade/upgrade_an_operator_release/", + "title": "b. Upgrade an operator release", + "tags": [], + "description": "Instructions on how to update the WebLogic Kubernetes Operator version.", + "content": "These instructions apply to upgrading operators within the 3.x release family as additional versions are released.\n On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project:\n$ mkdir \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X For example:\n$ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X This will create the directory \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator\n Run the following helm command to upgrade the operator:\n$ cd \u0026lt;workdir\u0026gt;/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.X.X --namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator For example:\n$ cd /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator $ helm upgrade --reuse-values --set image=ghcr.io/oracle/weblogic-kubernetes-operator:3.X.X --namespace operator --wait weblogic-kubernetes-operator kubernetes/charts/weblogic-operator The output will look similar to the following:\nRelease \u0026quot;weblogic-kubernetes-operator\u0026quot; has been upgraded. Happy Helming! NAME: weblogic-kubernetes-operator LAST DEPLOYED: Tue Mar 15 09:24:40 2022 NAMESPACE: operator STATUS: deployed REVISION: 3 TEST SUITE: None Verify that the operator\u0026rsquo;s pod and services are running by executing the following command:\n$ kubectl get all -n \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl get all -n opns The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE pod/weblogic-operator-69546866bd-h58sk 2/2 Running 0 112s NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE service/internal-weblogic-operator-svc ClusterIP 10.106.72.42 \u0026lt;none\u0026gt; 8082/TCP 2d NAME READY UP-TO-DATE AVAILABLE AGE deployment.apps/weblogic-operator 1/1 1 1 2d NAME DESIRED CURRENT READY AGE replicaset.apps/weblogic-operator-676d5cc6f4 0 0 0 2d replicaset.apps/weblogic-operator-69546866bd 1 1 1 112s " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/", + "title": "b. Using an Ingress with NGINX (SSL)", + "tags": [], + "description": "Steps to set up an Ingress for NGINX to direct traffic to the OIG domain using SSL.", + "content": "Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination.\nNote: All the steps below should be performed on the master node.\n Create a SSL certificate\na. Generate SSL certificate\nb. Create a Kubernetes secret for SSL\n Install NGINX\na. Configure the repository\nb. Create a namespace\nc. Install NGINX using helm\n Create an ingress for the domain\n Verify that you can access the domain URL\n Create a SSL certificate Generate SSL certificate Generate a private key and certificate signing request (CSR) using a tool of your choice. Send the CSR to your certificate authority (CA) to generate the certificate.\nIf you want to use a certificate for testing purposes you can generate a self signed certificate using openssl:\n$ mkdir \u0026lt;workdir\u0026gt;/ssl $ cd \u0026lt;workdir\u0026gt;/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=\u0026lt;nginx-hostname\u0026gt;\u0026#34; For example:\n$ mkdir /scratch/OIGK8S/ssl $ cd /scratch/OIGK8S/ssl $ openssl req -x509 -nodes -days 365 -newkey rsa:2048 -keyout tls.key -out tls.crt -subj \u0026#34;/CN=masternode.example.com\u0026#34; Note: The CN should match the host.domain of the master node in order to prevent hostname problems during certificate verification.\nThe output will look similar to the following:\nGenerating a 2048 bit RSA private key ..........................................+++ .......................................................................................................+++ writing new private key to 'tls.key' ----- Create a Kubernetes secret for SSL Create a secret for SSL containing the SSL certificate by running the following command:\n$ kubectl -n oigns create secret tls \u0026lt;domain_uid\u0026gt;-tls-cert --key \u0026lt;workdir\u0026gt;/tls.key --cert \u0026lt;workdir\u0026gt;/tls.crt For example:\n$ kubectl -n oigns create secret tls governancedomain-tls-cert --key /scratch/OIGK8S/ssl/tls.key --cert /scratch/OIGK8S/ssl/tls.crt The output will look similar to the following:\nsecret/governancedomain-tls-cert created Confirm that the secret is created by running the following command:\n$ kubectl get secret \u0026lt;domain_uid\u0026gt;-tls-cert -o yaml -n oigns For example:\n$ kubectl get secret governancedomain-tls-cert -o yaml -n oigns The output will look similar to the following:\napiVersion: v1 data: tls.crt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURGVENDQWYyZ0F3SUJBZ0lKQUl3ZjVRMWVxZnljTUEwR0NTcUdTSWIzRFFFQkN3VUFNQ0V4SHpBZEJnTlYKQkFNTUZtUmxiakF4WlhadkxuVnpMbTl5WVdOc1pTNWpiMjB3SGhjTk1qQXdPREV3TVRReE9UUXpXaGNOTWpFdwpPREV3TVRReE9UUXpXakFoTVI4d0hRWURWUVFEREJaa1pXNHdNV1YyYnk1MWN5NXZjbUZqYkdVdVkyOXRNSUlCCklqQU5CZ2txaGtpRzl3MEJBUUVGQUFPQ0FROEFNSUlCQ2dLQ0FRRUEyY0lpVUhwcTRVZzBhaGR6aXkycHY2cHQKSVIza2s5REd2eVRNY0syaWZQQ2dtUU5CdHV6VXNFN0l4c294eldITmU5RFpXRXJTSjVON3Ym1lTzJkMVd2NQp1aFhzbkFTbnkwY1N9xVDNQSlpDVk1MK0llZVFKdnhaVjZaWWU4V2FFL1NQSGJzczRjYy9wcG1mc3pxCnErUi83cXEyMm9ueHNHaE9vQ1h1TlQvMFF2WXVzMnNucGtueWRKRHUxelhGbDREYkFIZGMvamNVK0NPWWROeS8KT3Iza2JIV0FaTkR4OWxaZUREOTRmNXZLcUF2V0FkSVJZa2UrSmpNTHg0VHo2ZlM0VXoxbzdBSTVuSApPQ1ZMblV5U0JkaGVuWTNGNEdFU0wwbnorVlhFWjRWVjRucWNjRmo5cnJ0Q29pT1BBNlgvNGdxMEZJbi9Qd0lECkFRQUJvMUF3VGpBZEJnTlZIUTRFRmdRVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dId1lEVlIwakJCZ3cKRm9BVWw1VnVpVDBDT0xGTzcxMFBlcHRxSC9DRWZyY3dEQVlEVlIwVEJBVXdBd0VCL3pBTkJna3Foa2lHOXcwQgpBUXNGQUFPQ0FRRUFXdEN4b2ZmNGgrWXZEcVVpTFFtUnpqQkVBMHJCOUMwL1FWOG9JQzJ3d1hzYi9KaVNuMHdOCjNMdHppejc0aStEbk1yQytoNFQ3enRaSkc3NVluSGRKcmxQajgzVWdDLzhYTlFCSUNDbTFUa3RlVU1jWG0reG4KTEZEMHpReFhpVzV0N1FHcWtvK2FjeTlhUnUvN3JRMXlNSE9HdVVkTTZETzErNXF4cTdFNXFMamhyNEdKejV5OAoraW8zK25UcUVKMHFQOVRocG96RXhBMW80OEY0ZHJybWdqd3ROUldEQVpBYmYyV1JNMXFKWXhxTTJqdU1FQWNsCnFMek1TdEZUQ2o1UGFTQ0NUV1VEK3ZlSWtsRWRpaFdpRm02dzk3Y1diZ0lGMlhlNGk4L2szMmF1N2xUTDEvd28KU3Q2dHpsa20yV25uUFlVMzBnRURnVTQ4OU02Z1dybklpZz09Ci0tLS0tRU5EIENFUlRJRklDQVRFLS0tLS0K tls.key: LS0tLS1CRUdJTiBQUklWQVRFIEtFWS0tLS0tCk1JSUV1d0lCQURBTkJna3Foa2lHOXcwQkFRRUZBQVNDQktVd2dnU2hBZ0VBQW9JQkFRRFp3aUpRZW1yaFNEUnEKRjNPTExhbS9xbTBoSGVTVDBNYS9KTXh3cmFKODhLQ1pBMEcyN05Td1Rzakd5akhOWWMxNzBObFlTdEluazN1cApkdVo0N1ozVmEvbTZGZXljQktmTFJ4SW84NnIwSmhQYzhsa0pVd3Y0aDU1QW0vRmxYcGxoN3hab1Q5SThkdXl6Cmh4eittbVorek9xcjVIL3VxcmJhaWZHd2FFNmdKZTQxUC9SQzlpNnpheWVtU2ZKMGtPN1hOY1dYZ05zQWQxeisKTnhUNEk1aDAzTDg2dmVSc2RZQmswUEgyVmw0TVAzaC9tOHFWdW5mK1NvQzlZQjBoRmlSNzRtTXd2SGhQUHA5TApoVFBXanNBam1jYzRKVXVkVEpJRjJGNmRqY1hnWVJJdlNmUDVWY1JuaFZYaWVweHdXUDJ1dTBLaUk0OERwZi9pCkNyUVVpZjgvQWdNQkFBRUNnZjl6cnE2TUVueTFNYWFtdGM2c0laWU1QSDI5R2lSVVlwVXk5bG1sZ3BqUHh3V0sKUkRDay9Td0FmZG9yd1Q2ejNVRk1oYWJ4UU01a04vVjZFYkJlamQxT15bjdvWTVEQWJRRTR3RG9SZWlrVApONndWU0FrVC92Z1RXc1RqRlY1bXFKMCt6U2ppOWtySkZQNVNRN1F2cUswQ3BHRlNhVjY2dW8ycktiNmJWSkJYCkxPZmZPMytlS0tVazBaTnE1Q1NVQk9mbnFoNVFJSGdpaDNiMTRlNjB6bndrNWhaMHBHZE9BQm9aTkoKZ21lanUyTEdzVWxXTjBLOVdsUy9lcUllQzVzQm9jaWlocmxMVUpGWnpPRUV6LzErT2cyemhmT29yTE9rMTIrTgpjQnV0cTJWQ2I4ZFJDaFg1ZzJ0WnBrdzgzcXN5RSt3M09zYlQxa0VDZ1lFQTdxUnRLWGFONUx1SENvWlM1VWhNCm1WcnYxTEg0eGNhaDJIZnMksrMHJqQkJONGpkZkFDMmF3R3ZzU1EyR0lYRzVGYmYyK0pwL1kxbktKOEgKZU80MzNLWVgwTDE4NlNNLzFVay9HSEdTek1CWS9KdGR6WkRrbTA4UnBwaTl4bExTeDBWUWtFNVJVcnJJcTRJVwplZzBOM2RVTHZhTVl1UTBrR2dncUFETUNnWUVBNlpqWCtjU2VMZ1BVajJENWRpUGJ1TmVFd2RMeFNPZDFZMUFjCkUzQ01YTWozK2JxQ3BGUVIrTldYWWVuVmM1QiszajlSdHVnQ0YyTkNSdVdkZWowalBpL243UExIRHdCZVY0bVIKM3VQVHJmamRJbFovSFgzQ2NjVE94TmlaajU4VitFdkRHNHNHOGxtRTRieStYRExIYTJyMWxmUk9sUVRMSyswVgpyTU93eU1VQ2dZRUF1dm14WGM4NWxZRW9hU0tkU0cvQk9kMWlYSUtmc2VDZHRNT2M1elJ0UXRsSDQwS0RscE54CmxYcXBjbVc3MWpyYzk1RzVKNmE1ZG5xTE9OSFZoWW8wUEpmSXhPU052RXI2MTE5NjRBMm5sZXRHYlk0M0twUkEKaHBPRHlmdkZoSllmK29kaUJpZFUyL3ZBMCtUczNSUHJzRzBSOUVDOEZqVDNaZVhaNTF1R0xPa0NnWUFpTmU0NwplQjRxWXdrNFRsMTZmZG5xQWpaQkpLR05xY2c1V1R3alpMSkp6R3owdCtuMkl4SFd2WUZFSjdqSkNmcHFsaDlqCmlDcjJQZVV3K09QTlNUTG1JcUgydzc5L1pQQnNKWXVsZHZ4RFdGVWFlRXg1aHpkNDdmZlNRRjZNK0NHQmthYnIKVzdzU3R5V000ZFdITHpDaGZMS20yWGJBd0VqNUQrbkN1WTRrZVFLQmdFSkRHb0puM1NCRXcra2xXTE85N09aOApnc3lYQm9mUW1lRktIS2NHNzFZUFhJbTRlV1kyUi9KOCt5anc5b1FJQ3o5NlRidkdSZEN5QlJhbWhoTmFGUzVyCk9MZUc0ejVENE4zdThUc0dNem9QcU13KzBGSXJiQ3FzTnpGWTg3ekZweEdVaXZvRWZLNE82YkdERTZjNHFqNGEKNmlmK0RSRSt1TWRMWTQyYTA3ekoKLS0tLS1FTkQgUFJJVkFURSBLRVktLS0tLQo= kind: Secret metadata: creationTimestamp: \u0026quot;2022-03-10T14:02:50Z\u0026quot; name: governancedomain-tls-cert namespace: oigns resourceVersion: \u0026quot;3319899\u0026quot; uid: 274cc960-281a-494c-a3e3-d93c3abd051f type: kubernetes.io/tls Install NGINX Use helm to install NGINX.\nConfigure the repository Add the Helm chart repository for installing NGINX using the following command:\n$ helm repo add stable https://kubernetes.github.io/ingress-nginx The output will look similar to the following:\n\u0026quot;stable\u0026quot; has been added to your repositories Update the repository using the following command:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository Update Complete. Happy Helming! Create a namespace Create a Kubernetes namespace for NGINX:\n$ kubectl create namespace nginxssl The output will look similar to the following:\nnamespace/nginxssl created Install NGINX using helm If you can connect directly to the master node IP address from a browser, then install NGINX with the --set controller.service.type=NodePort parameter.\nIf you are using a Managed Service for your Kubernetes cluster, for example Oracle Kubernetes Engine (OKE) on Oracle Cloud Infrastructure (OCI), and connect from a browser to the Load Balancer IP address, then use the --set controller.service.type=LoadBalancer parameter. This instructs the Managed Service to setup a Load Balancer to direct traffic to the NGINX ingress.\n To install NGINX use the following helm command depending on if you are using NodePort or LoadBalancer:\na) Using NodePort\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=NodePort --set controller.admissionWebhooks.enabled=false stable/ingress-nginx NAME: nginx-ingress LAST DEPLOYED: Thu Mar 10 14:04:40 2022 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The nginx-ingress controller has been installed. Get the application URL by running these commands: export HTTP_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath=\u0026quot;{.spec.ports[0].nodePort}\u0026quot; nginx-ingress-controller) export HTTPS_NODE_PORT=$(kubectl --namespace nginxssl get services -o jsonpath=\u0026quot;{.spec.ports[1].nodePort}\u0026quot; nginx-ingress-controller) export NODE_IP=$(kubectl --namespace nginxssl get nodes -o jsonpath=\u0026quot;{.items[0].status.addresses[1].address}\u0026quot;) echo \u0026quot;Visit http://$NODE_IP:$HTTP_NODE_PORT to access your application via HTTP.\u0026quot; echo \u0026quot;Visit https://$NODE_IP:$HTTPS_NODE_PORT to access your application via HTTPS.\u0026quot; An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: ingressClassName: example-class rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: serviceName: exampleService servicePort: 80 path: / # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls b) Using LoadBalancer\n$ helm install nginx-ingress -n nginxssl --set controller.extraArgs.default-ssl-certificate=oigns/governancedomain-tls-cert --set controller.service.type=LoadBalancer --set controller.admissionWebhooks.enabled=false stable/ingress-nginx The output will look similar to the following:\nNAME: nginx-ingress LAST DEPLOYED: Thu Mar 10 14:06:42 2022 NAMESPACE: nginxssl STATUS: deployed REVISION: 1 TEST SUITE: None NOTES: The ingress-nginx controller has been installed. It may take a few minutes for the LoadBalancer IP to be available. You can watch the status by running 'kubectl --namespace nginxssl get services -o wide -w nginx-ingress-ingress-nginx-controller' An example Ingress that makes use of the controller: apiVersion: networking.k8s.io/v1 kind: Ingress metadata: annotations: kubernetes.io/ingress.class: nginx name: example namespace: foo spec: rules: - host: www.example.com http: paths: - path: / pathType: Prefix backend: service: name: exampleService port: 80 # This section is only required if TLS is to be enabled for the Ingress tls: - hosts: - www.example.com secretName: example-tls If TLS is enabled for the Ingress, a Secret containing the certificate and key must also be provided: apiVersion: v1 kind: Secret metadata: name: example-tls namespace: foo data: tls.crt: \u0026lt;base64 encoded cert\u0026gt; tls.key: \u0026lt;base64 encoded key\u0026gt; type: kubernetes.io/tls Setup routing rules for the domain Setup routing rules by running the following commands:\n$ cd $WORKDIR/kubernetes/charts/ingress-per-domain Edit values.yaml and change the domainUID parameter to match your domainUID, for example domainUID: governancedomain. Change sslType to SSL and secretName to governancedomain-tls-cert. The file should look as follows:\n# Load balancer type. Supported values are: TRAEFIK, NGINX type: NGINX # Type of Configuration Supported Values are : NONSSL,SSL # tls: NONSSL tls: SSL # TLS secret name if the mode is SSL secretName: governancedomain-tls-cert # TimeOut value to be set for nginx parameters proxy-read-timeout and proxy-send-timeout nginxTimeOut: 180 # WLS domain as backend to the load balancer wlsDomain: domainUID: governancedomain adminServerName: AdminServer adminServerPort: 7001 soaClusterName: soa_cluster soaManagedServerPort: 8001 oimClusterName: oim_cluster oimManagedServerPort: 14000 Create an ingress for the domain Create an Ingress for the domain (governancedomain-nginx), in the domain namespace by using the sample Helm chart:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml Note: The $WORKDIR/kubernetes/charts/ingress-per-domain/templates/nginx-ingress-k8s1.19.yaml and nginx-ingress.yaml has nginx.ingress.kubernetes.io/enable-access-log set to false. If you want to enable access logs then set this value to true before executing the command. Enabling access-logs can cause issues with disk space if not regularly maintained.\nFor example:\n$ cd $WORKDIR $ helm install governancedomain-nginx kubernetes/charts/ingress-per-domain --namespace oigns --values kubernetes/charts/ingress-per-domain/values.yaml The output will look similar to the following:\nNAME: governancedomain-nginx LAST DEPLOYED: Thu Mar 10 14:07:51 2022 NAMESPACE: oigns STATUS: deployed REVISION: 1 TEST SUITE: None Run the following command to show the ingress is created successfully:\n$ kubectl get ing -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get ing -n oigns The output will look similar to the following:\nNAME CLASS HOSTS ADDRESS PORTS AGE governancedomain-nginx \u0026lt;none\u0026gt; * x.x.x.x 80 49s Find the node port of NGINX using the following command:\n$ kubectl get services -n nginxssl -o jsonpath=\u0026#34;{.spec.ports[1].nodePort}\u0026#34; nginx-ingress-ingress-nginx-controller The output will look similar to the following:\n32033 Run the following command to check the ingress:\n$ kubectl describe ing governancedomain-nginx -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl describe ing governancedomain-nginx -n oigns The output will look similar to the following:\nNamespace: oigns Address: 10.96.160.58 Default backend: default-http-backend:80 (\u0026lt;error: endpoints \u0026quot;default-http-backend\u0026quot; not found\u0026gt;) Rules: Host Path Backends ---- ---- -------- * /console governancedomain-adminserver:7001 (10.244.2.96:7001) /em governancedomain-adminserver:7001 (10.244.2.96:7001) /soa governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) /integration governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) /soa-infra governancedomain-cluster-soa-cluster:8001 (10.244.2.97:8001) /identity governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /admin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /oim governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /sysadmin governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /workflowservice governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /xlWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /Nexaweb governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /callbackResponseService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /spml-xsd governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /HTTPClnt governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /reqsvc governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /iam governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /provisioning-callback governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /CertificationCallbackService governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /ucs governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /FacadeWebApp governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /OIGUI governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) /weblogic governancedomain-cluster-oim-cluster:14000 (10.244.2.98:14000) Annotations: kubernetes.io/ingress.class: nginx meta.helm.sh/release-name: governancedomain-nginx meta.helm.sh/release-namespace: oigns nginx.ingress.kubernetes.io/affinity: cookie nginx.ingress.kubernetes.io/configuration-snippet: more_set_input_headers \u0026quot;X-Forwarded-Proto: https\u0026quot;; more_set_input_headers \u0026quot;WL-Proxy-SSL: true\u0026quot;; nginx.ingress.kubernetes.io/enable-access-log: false nginx.ingress.kubernetes.io/ingress.allow-http: false nginx.ingress.kubernetes.io/proxy-buffer-size: 2000k Events: Type Reason Age From Message ---- ------ ---- ---- ------- Normal Sync 17s (x2 over 28s) nginx-ingress-controller Scheduled for sync To confirm that the new Ingress is successfully routing to the domain\u0026rsquo;s server pods, run the following command to send a request to the URL for the WebLogic ReadyApp framework:\nNote: If using a load balancer for your ingress replace ${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT} with ${LOADBALANCER-HOSTNAME}:${LOADBALANCER-PORT}.\n$ curl -v -k https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/weblogic/ready For example:\n$ curl -v -k https://masternode.example.com:32033/weblogic/ready The output will look similar to the following:\n$ curl -v -k https://masternode.example.com:32033/weblogic/ready * About to connect() to X.X.X.X port 32033 (#0) * Trying X.X.X.X... * Connected to masternode.example.com (X.X.X.X) port 32033 (#0) * Initializing NSS with certpath: sql:/etc/pki/nssdb * skipping SSL peer certificate verification * SSL connection using TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256 * Server certificate: * subject: CN=masternode.example.com * start date: Nov 10 13:05:21 2021 GMT * expire date: Nov 10 13:05:21 2022 GMT * common name: masternode.example.com * issuer: CN=masternode.example.com \u0026gt; GET /weblogic/ready HTTP/1.1 \u0026gt; User-Agent: curl/7.29.0 \u0026gt; Host: X.X.X.X:32033 \u0026gt; Accept: */* \u0026gt; \u0026lt; HTTP/1.1 200 OK \u0026lt; Server: nginx/1.19.1 \u0026lt; Date: Thu, 10 Mar 2022 14:09:57 GMT \u0026lt; Content-Length: 0 \u0026lt; Connection: keep-alive \u0026lt; Strict-Transport-Security: max-age=15724800; includeSubDomains \u0026lt; * Connection #0 to host X.X.X.X left intact Verify that you can access the domain URL After setting up the NGINX ingress, verify that the domain applications are accessible through the NGINX ingress port (for example 32033) as per Validate Domain URLs \n" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/manage-oam-domains/wlst-admin-operations/", + "title": "b. WLST Administration Operations", + "tags": [], + "description": "Describes the steps for WLST administration using helper pod running in the same Kubernetes Cluster as OAM Domain.", + "content": "To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain.\n Check to see if the helper pod exists by running:\n$ kubectl get pods -n \u0026lt;domain_namespace\u0026gt; | grep helper For example:\n$ kubectl get pods -n oamns | grep helper The output should look similar to the following:\nhelper 1/1 Running 0 26h If the helper pod doesn\u0026rsquo;t exist then see Step 1 in Prepare your environment to create it.\n Run the following command to start a bash shell in the helper pod:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash This will take you into a bash shell in the running helper pod:\n[oracle@helper ~]$ Connect to WLST using the following command:\n$ cd $ORACLE_HOME/oracle_common/common/bin $ ./wlst.sh The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Jython scans all the jar files it can find at first startup. Depending on the system, this process may take a few minutes to complete, and WLST may not return a prompt right away. Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To access t3 for the Administration Server connect as follows:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://accessdomain-adminserver:7001\u0026#39;) The output will look similar to the following:\nConnecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/accessdomain/serverConfig/\u0026gt; Or to access t3 for the OAM Cluster service, connect as follows:\nconnect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3://accessdomain-cluster-oam-cluster:14100\u0026#39;) The output will look similar to the following:\nConnecting to t3://accessdomain-cluster-oam-cluster:14100 with userid weblogic ... Successfully connected to managed Server \u0026quot;oam_server1\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. wls:/accessdomain/serverConfig/\u0026gt; Sample operations For a full list of WLST operations refer to WebLogic Server WLST Online and Offline Command Reference.\nDisplay servers wls:/accessdomain/serverConfig/\u0026gt; cd(\u0026#39;/Servers\u0026#39;) wls:/accessdomain/serverConfig/Servers\u0026gt; ls() dr-- AdminServer dr-- oam_policy_mgr1 dr-- oam_policy_mgr2 dr-- oam_policy_mgr3 dr-- oam_policy_mgr4 dr-- oam_policy_mgr5 dr-- oam_server1 dr-- oam_server2 dr-- oam_server3 dr-- oam_server4 dr-- oam_server5 wls:/accessdomain/serverConfig/Servers\u0026gt; Configure logging for managed servers Connect to the Administration Server and run the following:\nwls:/accessdomain/serverConfig/\u0026gt; domainRuntime() Location changed to domainRuntime tree. This is a read-only tree with DomainMBean as the root MBean. For more help, use help(\u0026#39;domainRuntime\u0026#39;) wls:/accessdomain/domainRuntime/\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; listLoggers(pattern=\u0026#34;oracle.oam.*\u0026#34;,target=\u0026#34;oam_server1\u0026#34;) ------------------------------------------+----------------- Logger | Level ------------------------------------------+----------------- oracle.oam | \u0026lt;Inherited\u0026gt; oracle.oam.admin.foundation.configuration | \u0026lt;Inherited\u0026gt; oracle.oam.admin.service.config | \u0026lt;Inherited\u0026gt; oracle.oam.agent | \u0026lt;Inherited\u0026gt; oracle.oam.agent-default | \u0026lt;Inherited\u0026gt; oracle.oam.audit | \u0026lt;Inherited\u0026gt; oracle.oam.binding | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.common.healthcheck | \u0026lt;Inherited\u0026gt; oracle.oam.common.runtimeent | \u0026lt;Inherited\u0026gt; oracle.oam.commonutil | \u0026lt;Inherited\u0026gt; oracle.oam.config | \u0026lt;Inherited\u0026gt; oracle.oam.controller | \u0026lt;Inherited\u0026gt; oracle.oam.default | \u0026lt;Inherited\u0026gt; oracle.oam.diagnostic | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authn | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authz | \u0026lt;Inherited\u0026gt; oracle.oam.engine.policy | \u0026lt;Inherited\u0026gt; oracle.oam.engine.ptmetadata | \u0026lt;Inherited\u0026gt; oracle.oam.engine.session | \u0026lt;Inherited\u0026gt; oracle.oam.engine.sso | \u0026lt;Inherited\u0026gt; oracle.oam.esso | \u0026lt;Inherited\u0026gt; oracle.oam.extensibility.lifecycle | \u0026lt;Inherited\u0026gt; oracle.oam.foundation.access | \u0026lt;Inherited\u0026gt; oracle.oam.idm | \u0026lt;Inherited\u0026gt; oracle.oam.install | \u0026lt;Inherited\u0026gt; oracle.oam.install.bootstrap | \u0026lt;Inherited\u0026gt; oracle.oam.install.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.ipf.rest.api | \u0026lt;Inherited\u0026gt; oracle.oam.oauth | \u0026lt;Inherited\u0026gt; oracle.oam.plugin | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam.workmanager | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.opensso | \u0026lt;Inherited\u0026gt; oracle.oam.pswd.service.provider | \u0026lt;Inherited\u0026gt; oracle.oam.replication | \u0026lt;Inherited\u0026gt; oracle.oam.user.identity.provider | \u0026lt;Inherited\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; Set the log level to TRACE:32:\nwls:/accessdomain/domainRuntime/\u0026gt; setLogLevel(target=\u0026#39;oam_server1\u0026#39;,logger=\u0026#39;oracle.oam\u0026#39;,level=\u0026#39;TRACE:32\u0026#39;,persist=\u0026#34;1\u0026#34;,addLogger=1) wls:/accessdomain/domainRuntime/\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; listLoggers(pattern=\u0026#34;oracle.oam.*\u0026#34;,target=\u0026#34;oam_server1\u0026#34;) ------------------------------------------+----------------- Logger | Level ------------------------------------------+----------------- oracle.oam | TRACE:32 oracle.oam.admin.foundation.configuration | \u0026lt;Inherited\u0026gt; oracle.oam.admin.service.config | \u0026lt;Inherited\u0026gt; oracle.oam.agent | \u0026lt;Inherited\u0026gt; oracle.oam.agent-default | \u0026lt;Inherited\u0026gt; oracle.oam.audit | \u0026lt;Inherited\u0026gt; oracle.oam.binding | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation | \u0026lt;Inherited\u0026gt; oracle.oam.certvalidation.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.common.healthcheck | \u0026lt;Inherited\u0026gt; oracle.oam.common.runtimeent | \u0026lt;Inherited\u0026gt; oracle.oam.commonutil | \u0026lt;Inherited\u0026gt; oracle.oam.config | \u0026lt;Inherited\u0026gt; oracle.oam.controller | \u0026lt;Inherited\u0026gt; oracle.oam.default | \u0026lt;Inherited\u0026gt; oracle.oam.diagnostic | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authn | \u0026lt;Inherited\u0026gt; oracle.oam.engine.authz | \u0026lt;Inherited\u0026gt; oracle.oam.engine.policy | \u0026lt;Inherited\u0026gt; oracle.oam.engine.ptmetadata | \u0026lt;Inherited\u0026gt; oracle.oam.engine.session | \u0026lt;Inherited\u0026gt; oracle.oam.engine.sso | \u0026lt;Inherited\u0026gt; oracle.oam.esso | \u0026lt;Inherited\u0026gt; oracle.oam.extensibility.lifecycle | \u0026lt;Inherited\u0026gt; oracle.oam.foundation.access | \u0026lt;Inherited\u0026gt; oracle.oam.idm | \u0026lt;Inherited\u0026gt; oracle.oam.install | \u0026lt;Inherited\u0026gt; oracle.oam.install.bootstrap | \u0026lt;Inherited\u0026gt; oracle.oam.install.mbeans | \u0026lt;Inherited\u0026gt; oracle.oam.ipf.rest.api | \u0026lt;Inherited\u0026gt; oracle.oam.oauth | \u0026lt;Inherited\u0026gt; oracle.oam.plugin | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.oam.workmanager | \u0026lt;Inherited\u0026gt; oracle.oam.proxy.opensso | \u0026lt;Inherited\u0026gt; oracle.oam.pswd.service.provider | \u0026lt;Inherited\u0026gt; oracle.oam.replication | \u0026lt;Inherited\u0026gt; oracle.oam.user.identity.provider | \u0026lt;Inherited\u0026gt; wls:/accessdomain/domainRuntime/\u0026gt; Verify that TRACE:32 log level is set by connecting to the Administration Server and viewing the logs:\n$ kubectl exec -it accessdomain-adminserver -n oamns -- /bin/bash [oracle@accessdomain-adminserver oracle]$ [oracle@accessdomain-adminserver oracle]$ cd /u01/oracle/user_projects/domains/accessdomain/servers/oam_server1/logs [oracle@accessdomain-adminserver logs]$ tail oam_server1-diagnostic.log 2022-03-07T10:26:14.793+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.observable.ObservableConfigStore$StoreWatcher] [SRC_METHOD: run] Start of run before start of detection at 1,635,848,774,793. Detector: oracle.security.am.admin.config.util.observable.DbStoreChangeDetector:Database configuration store:DSN:jdbc/oamds. Monitor: { StoreMonitor: { disabled: \u0026#39;false\u0026#39; } } [2022-03-07T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG_HISTORY not specified [2022-03-07T10:26:14.793+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.StoreUtil] [SRC_METHOD: getContainerProperty] Configuration property CONFIG not specified [2022-03-07T10:26:14.795+00:00] [oam_server1] [TRACE:32] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: getSelectSQL] SELECT SQL:SELECT version from IDM_OBJECT_STORE where id = ? and version = (select max(version) from IDM_OBJECT_STORE where id = ?) [2022-03-07T10:26:14.797+00:00] [oam_server1] [TRACE] [] [oracle.oam.config] [tid: Configuration Store Observer] [userId: \u0026lt;anonymous\u0026gt;] [ecid: 8b3ac37b-c7cf-46dd-aeee-5ed67886be21-0000000b,0:1795] [APP: oam_server] [partition-name: DOMAIN] [tenant-name: GLOBAL] [SRC_CLASS: oracle.security.am.admin.config.util.store.DbStore] [SRC_METHOD: load] Time (ms) to load key CONFIG:-1{FIELD_TYPES=INT, SELECT_FIELDS=SELECT version from IDM_OBJECT_STORE }:4 Performing WLST Administration via SSL By default the SSL port is not enabled for the Administration Server or OAM Managed Servers. To configure the SSL port for the Administration Server and Managed Servers login to WebLogic Administration console https://${MASTERNODE-HOSTNAME}:${MASTERNODE-PORT}/console and navigate to Lock \u0026amp; Edit -\u0026gt; Environment -\u0026gt;Servers -\u0026gt; server_name -\u0026gt;Configuration -\u0026gt; General -\u0026gt; SSL Listen Port Enabled -\u0026gt; Provide SSL Port ( For Administration Server: 7002 and for OAM Managed Server (oam_server1): 14101) - \u0026gt; Save -\u0026gt; Activate Changes.\nNote: If configuring the OAM Managed Servers for SSL you must enable SSL on the same port for all servers (oam_server1 through oam_server5)\n Create a myscripts directory as follows:\n$ cd $WORKDIR/kubernetes/ $ mkdir myscripts $ cd myscripts For example:\n$ cd $WORKDIR/kubernetes/ $ mkdir myscripts $ cd myscripts Create a sample yaml template file in the myscripts directory called \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml to create a Kubernetes service for the Administration Server:\nNote: Update the domainName, domainUID and namespace based on your environment. For example:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: accessdomain weblogic.domainUID: accessdomain weblogic.resourceVersion: domain-v2 weblogic.serverName: AdminServer name: accessdomain-adminserverssl namespace: oamns spec: clusterIP: None ports: - name: default port: 7002 protocol: TCP targetPort: 7002 selector: weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: accessdomain weblogic.serverName: AdminServer type: ClusterIP and the following sample yaml template file \u0026lt;domain_uid\u0026gt;-oamcluster-ssl.yaml for the OAM Managed Server:\napiVersion: v1 kind: Service metadata: labels: serviceType: SERVER weblogic.domainName: accessdomain weblogic.domainUID: accessdomain weblogic.resourceVersion: domain-v2 name: accessdomain-oamcluster-ssl namespace: oamns spec: clusterIP: None ports: - name: default port: 14101 protocol: TCP targetPort: 14101 selector: weblogic.clusterName: oam_cluster weblogic.createdByOperator: \u0026quot;true\u0026quot; weblogic.domainUID: accessdomain type: ClusterIP Apply the template using the following command for the AdminServer:\n$ kubectl apply -f \u0026lt;domain_uid\u0026gt;-adminserver-ssl.yaml For example:\n$ kubectl apply -f accessdomain-adminserver-ssl.yaml service/accessdomain-adminserverssl created and using the following command for the OAM Managed Server:\n$ kubectl apply -f \u0026lt;domain_uid\u0026gt;-oamcluster-ssl.yaml For example:\n$ kubectl apply -f accessdomain-oamcluster-ssl.yaml service/accessdomain-oamcluster-ssl created Validate that the Kubernetes Services to access SSL ports are created successfully:\n$ kubectl get svc -n \u0026lt;domain_namespace\u0026gt; |grep ssl For example:\n$ kubectl get svc -n oamns |grep ssl The output will look similar to the following:\naccessdomain-adminserverssl ClusterIP None \u0026lt;none\u0026gt; 7002/TCP 102s accessdomain-oamcluster-ssl ClusterIP None \u0026lt;none\u0026gt; 14101/TCP 35s Inside the bash shell of the running helper pod, run the following:\n[oracle@helper bin]$ export WLST_PROPERTIES=\u0026#34;-Dweblogic.security.SSL.ignoreHostnameVerification=true -Dweblogic.security.TrustKeyStore=DemoTrust\u0026#34; [oracle@helper bin]$ cd /u01/oracle/oracle_common/common/bin [oracle@helper bin]$ ./wlst.sh Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands wls:/offline\u0026gt; To connect to the Administration Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://accessdomain-adminserverssl:7002\u0026#39;) Connecting to t3s://accessdomain-adminserverssl:7002 with userid weblogic ... \u0026lt;Mar 7, 2022 10:42:05 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Mar 7, 2022 10:42:05 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Mar 7, 2022 10:42:05 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to Admin Server \u0026#34;AdminServer\u0026#34; that belongs to domain \u0026#34;accessdomain\u0026#34;. wls:/accessdomain/serverConfig/\u0026gt; To connect to the OAM Managed Server t3s service:\nwls:/offline\u0026gt; connect(\u0026#39;weblogic\u0026#39;,\u0026#39;\u0026lt;password\u0026gt;\u0026#39;,\u0026#39;t3s://accessdomain-oamcluster-ssl:14101\u0026#39;) Connecting to t3s://accessdomain-oamcluster-ssl:14101 with userid weblogic ... \u0026lt;Mar 7, 2022 10:43:16 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090905\u0026gt; \u0026lt;Disabling the CryptoJ JCE Provider self-integrity check for better startup performance. To enable this check, specify -Dweblogic.security.allowCryptoJDefaultJCEVerification=true.\u0026gt; \u0026lt;Mar 7, 2022 10:43:16 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090906\u0026gt; \u0026lt;Changing the default Random Number Generator in RSA CryptoJ from ECDRBG128 to HMACDRBG. To disable this change, specify -Dweblogic.security.allowCryptoJDefaultPRNG=true.\u0026gt; \u0026lt;Mar 7, 2022 10:43:16 AM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;Security\u0026gt; \u0026lt;BEA-090909\u0026gt; \u0026lt;Using the configured custom SSL Hostname Verifier implementation: weblogic.security.utils.SSLWLSHostnameVerifier$NullHostnameVerifier.\u0026gt; Successfully connected to managed Server \u0026#34;oam_server1\u0026#34; that belongs to domain \u0026#34;accessdomain\u0026#34;. " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/manage-oid-containers/monitoring-oid-instance/", + "title": "c) Monitoring an Oracle Internet Directory Instance", + "tags": [], + "description": "Describes the steps for Monitoring the Oracle Internet Directory environment.", + "content": " Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana objects created Add the NodePort Verify using Grafana GUI Introduction After the Oracle Internet Directory instance (OID) is set up you can monitor it using Prometheus and Grafana.\nInstall Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace monitoring The output will look similar to the following:\nnamespace/monitoring created Add Prometheus and Grafana Helm repositories Add the Prometheus and Grafana Helm repositories by issuing the following command:\n$ helm repo add prometheus https://prometheus-community.github.io/helm-charts The output will look similar to the following:\n\u0026#34;prometheus\u0026#34; has been added to your repositories Run the following command to update the repositories:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026#34;stable\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus-community\u0026#34; chart repository Update Complete. Happy Helming! Install the Prometheus operator Install the Prometheus operator using the helm command:\n$ helm install \u0026lt;release_name\u0026gt; prometheus/kube-prometheus-stack -n \u0026lt;namespace\u0026gt; For example:\n$ helm install monitoring prometheus/kube-prometheus-stack -n monitoring The output should look similar to the following:\nNAME: monitoring LAST DEPLOYED: Fri Mar 18 09:57:54 2022 NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026#34;release=monitoring\u0026#34; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. View Prometheus and Grafana Objects created View the objects created for Prometheus and Grafana by issuing the following command:\n$ kubectl get all,service,pod -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get all,service,pod -o wide -n monitoring The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 36s 10.244.1.78 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-grafana-578f79599c-qc9gd 3/3 Running 0 47s 10.244.2.200 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-prometheus-operator-65cdf7995-kndgg 1/1 Running 0 47s 10.244.2.199 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-state-metrics-56bfd4f44f-85l4p 1/1 Running 0 47s 10.244.1.76 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-g2x9g 1/1 Running 0 47s 100.102.48.121 \u0026lt;master-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-p9kkq 1/1 Running 0 47s 100.102.48.84 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-rzhrd 1/1 Running 0 47s 100.102.48.28 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 35s 10.244.1.79 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 36s app.kubernetes.io/name=alertmanager service/monitoring-grafana ClusterIP 10.110.193.30 \u0026lt;none\u0026gt; 80/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana service/monitoring-kube-prometheus-alertmanager ClusterIP 10.104.2.37 \u0026lt;none\u0026gt; 9093/TCP 47s alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager service/monitoring-kube-prometheus-operator ClusterIP 10.99.162.229 \u0026lt;none\u0026gt; 443/TCP 47s app=kube-prometheus-stack-operator,release=monitoring service/monitoring-kube-prometheus-prometheus ClusterIP 10.108.161.46 \u0026lt;none\u0026gt; 9090/TCP 47s app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus service/monitoring-kube-state-metrics ClusterIP 10.111.162.185 \u0026lt;none\u0026gt; 8080/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics service/monitoring-prometheus-node-exporter ClusterIP 10.109.21.136 \u0026lt;none\u0026gt; 9100/TCP 47s app=prometheus-node-exporter,release=monitoring service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 35s app.kubernetes.io/name=prometheus NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR daemonset.apps/monitoring-prometheus-node-exporter 3 3 3 3 3 \u0026lt;none\u0026gt; 47s node-exporter quay.io/prometheus/node-exporter:v1.3.1 app=prometheus-node-exporter,release=monitoring NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR deployment.apps/monitoring-grafana 1/1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana deployment.apps/monitoring-kube-prometheus-operator 1/1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,release=monitoring deployment.apps/monitoring-kube-state-metrics 1/1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR replicaset.apps/monitoring-grafana-578f79599c 1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995 1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f 1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f NAME READY AGE CONTAINERS IMAGES statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager 1/1 36s alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 35s prometheus,config-reloader quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 Add the NodePort Edit the grafana service to add the NodePort:\n$ kubectl edit service/\u0026lt;deployment_name\u0026gt;-grafana -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit service/monitoring-grafana -n monitoring Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\nChange the ports entry and add nodePort: 30091 and type: NodePort:\n ports: - name: http-web nodePort: 30091 port: 80 protocol: TCP targetPort: 3000 selector: app.kubernetes.io/instance: monitoring app.kubernetes.io/name: grafana sessionAffinity: None type: NodePort Save the file and exit (:wq).\n Verify Using Grafana GUI Access the Grafana GUI using http://\u0026lt;HostIP\u0026gt;:\u0026lt;nodeport\u0026gt; and login with admin/prom-operator. Change the password when prompted.\n Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Click Upload JSON file and select the json downloaded file. In the Prometheus drop down box select Prometheus. Click Import. The dashboard should be displayed.\n Verify your installation by viewing some of the customized dashboard views.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oud/manage-oud-containers/monitoring-oud-instance/", + "title": "c) Monitoring an Oracle Unified Directory Instance", + "tags": [], + "description": "Describes the steps for Monitoring the Oracle Unified Directory environment.", + "content": " Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana objects created Add the NodePort Verify using Grafana GUI Introduction After the Oracle Unified Directory instance (OUD) is set up you can monitor it using Prometheus and Grafana.\nInstall Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace monitoring The output will look similar to the following:\nnamespace/monitoring created Add Prometheus and Grafana Helm repositories Add the Prometheus and Grafana Helm repositories by issuing the following command:\n$ helm repo add prometheus https://prometheus-community.github.io/helm-charts The output will look similar to the following:\n\u0026#34;prometheus\u0026#34; has been added to your repositories Run the following command to update the repositories:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026#34;stable\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus-community\u0026#34; chart repository Update Complete. Happy Helming! Install the Prometheus operator Install the Prometheus operator using the helm command:\n$ helm install \u0026lt;release_name\u0026gt; prometheus/kube-prometheus-stack -n \u0026lt;namespace\u0026gt; For example:\n$ helm install monitoring prometheus/kube-prometheus-stack -n monitoring The output should look similar to the following:\nNAME: monitoring LAST DEPLOYED: Fri Mar 18 09:57:54 2022 NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026#34;release=monitoring\u0026#34; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. View Prometheus and Grafana Objects created View the objects created for Prometheus and Grafana by issuing the following command:\n$ kubectl get all,service,pod -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get all,service,pod -o wide -n monitoring The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 36s 10.244.1.78 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-grafana-578f79599c-qc9gd 3/3 Running 0 47s 10.244.2.200 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-prometheus-operator-65cdf7995-kndgg 1/1 Running 0 47s 10.244.2.199 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-state-metrics-56bfd4f44f-85l4p 1/1 Running 0 47s 10.244.1.76 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-g2x9g 1/1 Running 0 47s 100.102.48.121 \u0026lt;master-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-p9kkq 1/1 Running 0 47s 100.102.48.84 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-rzhrd 1/1 Running 0 47s 100.102.48.28 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 35s 10.244.1.79 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 36s app.kubernetes.io/name=alertmanager service/monitoring-grafana ClusterIP 10.110.193.30 \u0026lt;none\u0026gt; 80/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana service/monitoring-kube-prometheus-alertmanager ClusterIP 10.104.2.37 \u0026lt;none\u0026gt; 9093/TCP 47s alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager service/monitoring-kube-prometheus-operator ClusterIP 10.99.162.229 \u0026lt;none\u0026gt; 443/TCP 47s app=kube-prometheus-stack-operator,release=monitoring service/monitoring-kube-prometheus-prometheus ClusterIP 10.108.161.46 \u0026lt;none\u0026gt; 9090/TCP 47s app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus service/monitoring-kube-state-metrics ClusterIP 10.111.162.185 \u0026lt;none\u0026gt; 8080/TCP 47s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics service/monitoring-prometheus-node-exporter ClusterIP 10.109.21.136 \u0026lt;none\u0026gt; 9100/TCP 47s app=prometheus-node-exporter,release=monitoring service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 35s app.kubernetes.io/name=prometheus NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR daemonset.apps/monitoring-prometheus-node-exporter 3 3 3 3 3 \u0026lt;none\u0026gt; 47s node-exporter quay.io/prometheus/node-exporter:v1.3.1 app=prometheus-node-exporter,release=monitoring NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR deployment.apps/monitoring-grafana 1/1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana deployment.apps/monitoring-kube-prometheus-operator 1/1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,release=monitoring deployment.apps/monitoring-kube-state-metrics 1/1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR replicaset.apps/monitoring-grafana-578f79599c 1 1 1 47s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995 1 1 1 47s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f 1 1 1 47s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f NAME READY AGE CONTAINERS IMAGES statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager 1/1 36s alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 35s prometheus,config-reloader quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 Add the NodePort Edit the grafana service to add the NodePort:\n$ kubectl edit service/\u0026lt;deployment_name\u0026gt;-grafana -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit service/monitoring-grafana -n monitoring Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\nChange the ports entry and add nodePort: 30091 and type: NodePort:\n ports: - name: http-web nodePort: 30091 port: 80 protocol: TCP targetPort: 3000 selector: app.kubernetes.io/instance: monitoring app.kubernetes.io/name: grafana sessionAffinity: None type: NodePort Save the file and exit (:wq).\n Verify Using Grafana GUI Access the Grafana GUI using http://\u0026lt;HostIP\u0026gt;:\u0026lt;nodeport\u0026gt; and login with admin/prom-operator. Change the password when prompted.\n Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Click Upload JSON file and select the json downloaded file. In the Prometheus drop down box select Prometheus. Click Import. The dashboard should be displayed.\n Verify your installation by viewing some of the customized dashboard views.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/", + "title": "c) Monitoring an Oracle Unified Directory Services Manager Instance", + "tags": [], + "description": "Describes the steps for Monitoring the Oracle Unified Directory Services Manager environment.", + "content": " Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana.\nInstall Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. To create your namespace issue the following command:\n$ kubectl create namespace \u0026lt;namespace\u0026gt; For example:\n$ kubectl create namespace monitoring The output will look similar to the following:\nnamespace/monitoring created Add Prometheus and Grafana Helm repositories Add the Prometheus and Grafana Helm repositories by issuing the following command:\n$ helm repo add prometheus https://prometheus-community.github.io/helm-charts The output will look similar to the following:\n\u0026#34;prometheus\u0026#34; has been added to your repositories Run the following command to update the repositories:\n$ helm repo update The output will look similar to the following:\nHang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026#34;stable\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus\u0026#34; chart repository ...Successfully got an update from the \u0026#34;prometheus-community\u0026#34; chart repository Update Complete. Happy Helming! Install the Prometheus operator Install the Prometheus operator using the helm command:\n$ helm install \u0026lt;release_name\u0026gt; prometheus/kube-prometheus-stack -n \u0026lt;namespace\u0026gt; For example:\n$ helm install monitoring prometheus/kube-prometheus-stack -n monitoring The output should look similar to the following:\nNAME: monitoring LAST DEPLOYED: Thu Mar 24 16:29:23 2022 NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026#34;release=monitoring\u0026#34; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. View Prometheus and Grafana Objects created View the objects created for Prometheus and Grafana by issuing the following command:\n$ kubectl get all,service,pod -o wide -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get all,service,pod -o wide -n monitoring The output will look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-monitoring-kube-prometheus-alertmanager-0 2/2 Running 0 27s 10.244.2.141 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-grafana-578f79599c-qqdfb 2/3 Running 0 34s 10.244.1.127 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-prometheus-operator-65cdf7995-w6btr 1/1 Running 0 34s 10.244.1.126 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-kube-state-metrics-56bfd4f44f-5ls8t 1/1 Running 0 34s 10.244.2.139 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-5b2f6 1/1 Running 0 34s 100.102.48.84 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-fw9xh 1/1 Running 0 34s 100.102.48.28 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/monitoring-prometheus-node-exporter-s5n9g 1/1 Running 0 34s 100.102.48.121 \u0026lt;master-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-monitoring-kube-prometheus-prometheus-0 2/2 Running 0 26s 10.244.1.128 \u0026lt;worker-node\u0026gt; \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 27s app.kubernetes.io/name=alertmanager service/monitoring-grafana ClusterIP 10.110.97.252 \u0026lt;none\u0026gt; 80/TCP 34s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana service/monitoring-kube-prometheus-alertmanager ClusterIP 10.110.82.176 \u0026lt;none\u0026gt; 9093/TCP 34s alertmanager=monitoring-kube-prometheus-alertmanager,app.kubernetes.io/name=alertmanager service/monitoring-kube-prometheus-operator ClusterIP 10.104.147.173 \u0026lt;none\u0026gt; 443/TCP 34s app=kube-prometheus-stack-operator,release=monitoring service/monitoring-kube-prometheus-prometheus ClusterIP 10.110.109.245 \u0026lt;none\u0026gt; 9090/TCP 34s app.kubernetes.io/name=prometheus,prometheus=monitoring-kube-prometheus-prometheus service/monitoring-kube-state-metrics ClusterIP 10.107.111.214 \u0026lt;none\u0026gt; 8080/TCP 34s app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics service/monitoring-prometheus-node-exporter ClusterIP 10.108.97.196 \u0026lt;none\u0026gt; 9100/TCP 34s app=prometheus-node-exporter,release=monitoring service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 26s app.kubernetes.io/name=prometheus NAME DESIRED CURRENT READY UP-TO-DATE AVAILABLE NODE SELECTOR AGE CONTAINERS IMAGES SELECTOR daemonset.apps/monitoring-prometheus-node-exporter 3 3 3 3 3 \u0026lt;none\u0026gt; 34s node-exporter quay.io/prometheus/node-exporter:v1.3.1 app=prometheus-node-exporter,release=monitoring NAME READY UP-TO-DATE AVAILABLE AGE CONTAINERS IMAGES SELECTOR deployment.apps/monitoring-grafana 0/1 1 0 34s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana deployment.apps/monitoring-kube-prometheus-operator 1/1 1 1 34s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,release=monitoring deployment.apps/monitoring-kube-state-metrics 1/1 1 1 34s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics NAME DESIRED CURRENT READY AGE CONTAINERS IMAGES SELECTOR replicaset.apps/monitoring-grafana-578f79599c 1 1 0 34s grafana-sc-dashboard,grafana-sc-datasources,grafana quay.io/kiwigrid/k8s-sidecar:1.15.6,quay.io/kiwigrid/k8s-sidecar:1.15.6,grafana/grafana:8.4.2 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=grafana,pod-template-hash=578f79599c replicaset.apps/monitoring-kube-prometheus-operator-65cdf7995 1 1 1 34s kube-prometheus-stack quay.io/prometheus-operator/prometheus-operator:v0.55.0 app=kube-prometheus-stack-operator,pod-template-hash=65cdf7995,release=monitoring replicaset.apps/monitoring-kube-state-metrics-56bfd4f44f 1 1 1 34s kube-state-metrics k8s.gcr.io/kube-state-metrics/kube-state-metrics:v2.4.1 app.kubernetes.io/instance=monitoring,app.kubernetes.io/name=kube-state-metrics,pod-template-hash=56bfd4f44f NAME READY AGE CONTAINERS IMAGES statefulset.apps/alertmanager-monitoring-kube-prometheus-alertmanager 1/1 27s alertmanager,config-reloader quay.io/prometheus/alertmanager:v0.23.0,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 statefulset.apps/prometheus-monitoring-kube-prometheus-prometheus 1/1 26s prometheus,config-reloader quay.io/prometheus/prometheus:v2.33.5,quay.io/prometheus-operator/prometheus-config-reloader:v0.55.0 Add the NodePort Edit the grafana service to add the NodePort:\n$ kubectl edit service/\u0026lt;deployment_name\u0026gt;-grafana -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl edit service/monitoring-grafana -n monitoring Note: This opens an edit session for the domain where parameters can be changed using standard vi commands.\nChange the ports entry and add nodePort: 30091 and type: NodePort:\n ports: - name: http-web nodePort: 30091 port: 80 protocol: TCP targetPort: 3000 selector: app.kubernetes.io/instance: monitoring app.kubernetes.io/name: grafana sessionAffinity: None type: NodePort Save the file and exit (:wq).\n Verify Using Grafana GUI Access the Grafana GUI using http://\u0026lt;HostIP\u0026gt;:\u0026lt;nodeport\u0026gt; and login with admin/prom-operator. Change the password when prompted.\n Download the K8 Cluster Detail Dashboard json file from: https://grafana.com/grafana/dashboards/10856.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Click Upload JSON file and select the json downloaded file. In the Prometheus drop down box select Prometheus. Click Import. The dashboard should be displayed.\n Verify your installation by viewing some of the customized dashboard views.\n " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/manage-oam-domains/logging-and-visualization/", + "title": "c. Logging and Visualization", + "tags": [], + "description": "Describes the steps for logging and visualization with Elasticsearch and Kibana.", + "content": "After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana.\nInstall Elasticsearch and Kibana If your domain namespace is anything other than oamns, edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of oamns to your domain namespace.\n Create a Kubernetes secret to access the Elasticsearch and Kibana container images:\nNote: You must first have a user account on hub.docker.com.\n$ kubectl create secret docker-registry \u0026#34;dockercred\u0026#34; --docker-server=\u0026#34;https://index.docker.io/v1/\u0026#34; --docker-username=\u0026#34;\u0026lt;docker_username\u0026gt;\u0026#34; --docker-password=\u0026lt;password\u0026gt; --docker-email=\u0026lt;docker_email_credentials\u0026gt; --namespace=\u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl create secret docker-registry \u0026quot;dockercred\u0026quot; --docker-server=\u0026quot;https://index.docker.io/v1/\u0026quot; --docker-username=\u0026quot;username\u0026quot; --docker-password=\u0026lt;password\u0026gt; --docker-email=user@example.com --namespace=oamns The output will look similar to the following:\nsecret/dockercred created Create the Kubernetes resource using the following command:\n$ kubectl apply -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml The output will look similar to the following:\ndeployment.apps/elasticsearch created service/elasticsearch created deployment.apps/kibana created service/kibana created Run the following command to ensure Elasticsearch is used by the operator:\n$ helm get values --all weblogic-kubernetes-operator -n opns The output will look similar to the following:\nCOMPUTED VALUES: clusterSizePaddingValidationEnabled: true domainNamespaceLabelSelector: weblogic-operator=enabled domainNamespaceSelectionStrategy: LabelSelector domainNamespaces: - default elasticSearchHost: elasticsearch.default.svc.cluster.local elasticSearchPort: 9200 elkIntegrationEnabled: true enableClusterRoleBinding: true externalDebugHttpPort: 30999 externalRestEnabled: false externalRestHttpsPort: 31001 externalServiceNameSuffix: -ext image: ghcr.io/oracle/weblogic-kubernetes-operator:3.3.0 imagePullPolicy: IfNotPresent internalDebugHttpPort: 30999 introspectorJobNameSuffix: -introspector javaLoggingFileCount: 10 javaLoggingFileSizeLimit: 20000000 javaLoggingLevel: FINE logStashImage: logstash:6.6.0 remoteDebugNodePortEnabled: false serviceAccount: op-sa suspendOnDebugStartup: false To check that Elasticsearch and Kibana are deployed in the Kubernetes cluster, run the following command:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; | grep 'elasticsearch\\|kibana' For example:\n$ kubectl get pods -n oamns | grep 'elasticsearch\\|kibana' The output will look similar to the following:\nelasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 85s kibana-57f6685789-mgwdl 1/1 Running 0 85s Create the logstash pod OAM Server logs can be pushed to the Elasticsearch server using the logstash pod. The logstash pod needs access to the persistent volume of the OAM domain created previously, for example accessdomain-domain-pv. The steps to create the logstash pod are as follows:\n Obtain the OAM domain persistence volume details:\n$ kubectl get pv -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl get pv -n oamns The output will look similar to the following:\nNAME CAPACITY ACCESS MODES RECLAIM POLICY STATUS CLAIM STORAGECLASS REASON AGE accessdomain-domain-pv 10Gi RWX Retain Bound oamns/accessdomain-domain-pvc accessdomain-domain-storage-class 23h Make note of the CLAIM value, for example in this case accessdomain-domain-pvc\n Run the following command to get the mountPath of your domain:\n$ kubectl describe domains \u0026lt;domain_uid\u0026gt; -n \u0026lt;domain_namespace\u0026gt; | grep \u0026#34;Mount Path\u0026#34; For example:\n$ kubectl describe domains accessdomain -n oamns | grep \u0026#34;Mount Path\u0026#34; The output will look similar to the following:\nMount Path: /u01/oracle/user_projects/domains Navigate to the $WORKDIR/kubernetes/elasticsearch-and-kibana directory and create a logstash.yaml file as follows. Change the claimName and mountPath values to match the values returned in the previous commands. Change namespace to your domain namespace e.g oamns:\napiVersion: apps/v1 kind: Deployment metadata: name: logstash-wls namespace: oamns spec: selector: matchLabels: k8s-app: logstash-wls template: # create pods using pod definition in this template metadata: labels: k8s-app: logstash-wls spec: volumes: - name: weblogic-domain-storage-volume persistentVolumeClaim: claimName: accessdomain-domain-pvc - name: shared-logs emptyDir: {} imagePullSecrets: - name: dockercred containers: - name: logstash image: logstash:6.6.0 command: [\u0026quot;/bin/sh\u0026quot;] args: [\u0026quot;/usr/share/logstash/bin/logstash\u0026quot;, \u0026quot;-f\u0026quot;, \u0026quot;/u01/oracle/user_projects/domains/logstash/logstash.conf\u0026quot;] imagePullPolicy: IfNotPresent volumeMounts: - mountPath: /u01/oracle/user_projects/domains name: weblogic-domain-storage-volume - name: shared-logs mountPath: /shared-logs ports: - containerPort: 5044 name: logstash In the NFS persistent volume directory that corresponds to the mountPath /u01/oracle/user_projects/domains, create a logstash directory. For example:\n$ mkdir -p /scratch/shared/accessdomainpv/logstash Create a logstash.conf in the newly created logstash directory that contains the following. Make sure the paths correspond to your mountPath and domain name. Also, if your namespace is anything other than oamns change \u0026quot;elasticsearch.oamns.svc.cluster.local:9200\u0026quot; to \u0026quot;elasticsearch.\u0026lt;namespace\u0026gt;.svc.cluster.local:9200\u0026quot;:\ninput { file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/AdminServer*.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_policy_mgr*.log\u0026quot; tags =\u0026gt; \u0026quot;Policymanager_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/logs/accessdomain/oam_server*.log\u0026quot; tags =\u0026gt; \u0026quot;Oamserver_log\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/AdminServer-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Adminserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_policy_mgr*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Policy_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/oam_server*-diagnostic.log\u0026quot; tags =\u0026gt; \u0026quot;Oamserver_diagnostic\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/**/logs/access*.log\u0026quot; tags =\u0026gt; \u0026quot;Access_logs\u0026quot; start_position =\u0026gt; beginning } file { path =\u0026gt; \u0026quot;/u01/oracle/user_projects/domains/accessdomain/servers/AdminServer/logs/auditlogs/OAM/audit.log\u0026quot; tags =\u0026gt; \u0026quot;Audit_logs\u0026quot; start_position =\u0026gt; beginning } } filter { grok { match =\u0026gt; [ \u0026quot;message\u0026quot;, \u0026quot;\u0026lt;%{DATA:log_timestamp}\u0026gt; \u0026lt;%{WORD:log_level}\u0026gt; \u0026lt;%{WORD:thread}\u0026gt; \u0026lt;%{HOSTNAME:hostname}\u0026gt; \u0026lt;%{HOSTNAME:servername}\u0026gt; \u0026lt;%{DATA:timer}\u0026gt; \u0026lt;\u0026lt;%{DATA:kernel}\u0026gt;\u0026gt; \u0026lt;\u0026gt; \u0026lt;%{DATA:uuid}\u0026gt; \u0026lt;%{NUMBER:timestamp}\u0026gt; \u0026lt;%{DATA:misc}\u0026gt; \u0026lt;%{DATA:log_number}\u0026gt; \u0026lt;%{DATA:log_message}\u0026gt;\u0026quot; ] } if \u0026quot;_grokparsefailure\u0026quot; in [tags] { mutate { remove_tag =\u0026gt; [ \u0026quot;_grokparsefailure\u0026quot; ] } } } output { elasticsearch { hosts =\u0026gt; [\u0026quot;elasticsearch.oamns.svc.cluster.local:9200\u0026quot;] } } Deploy the logstash pod by executing the following command:\n$ kubectl create -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps/logstash-wls created Run the following command to check the logstash pod is created correctly:\n$ kubectl get pods -n \u0026lt;namespace\u0026gt; For example:\n$ kubectl get pods -n oamns The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE accessdomain-adminserver 1/1 Running 0 18h accessdomain-create-oam-infra-domain-job-7c9r9 0/1 Completed 0 23h accessdomain-oam-policy-mgr1 1/1 Running 0 18h accessdomain-oam-policy-mgr2 1/1 Running 0 18h accessdomain-oam-server1 1/1 Running 1 18h accessdomain-oam-server2 1/1 Running 1 18h elasticsearch-f7b7c4c4-tb4pp 1/1 Running 0 5m helper 1/1 Running 0 23h kibana-57f6685789-mgwdl 1/1 Running 0 5m logstash-wls-6687c5bf6-jmmdp 1/1 Running 0 12s nginx-ingress-ingress-nginx-controller-76fb7678f-k8rhq 1/1 Running 0 20h Verify and access the Kibana console Check if the indices are created correctly in the elasticsearch pod shown above:\n$ kubectl exec -it \u0026lt;elasticsearch-pod\u0026gt; -n \u0026lt;namespace\u0026gt; -- /bin/bash For example:\n$ kubectl exec -it elasticsearch-f7b7c4c4-tb4pp -n oamns -- /bin/bash This will take you into a bash shell in the elasticsearch pod:\n[root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# In the elasticsearch bash shell, run the following to check the indices:\n[root@elasticsearch-f7b7c4c4-tb4pp elasticsearch]# curl -i \u0026#34;127.0.0.1:9200/_cat/indices?v\u0026#34; The output will look similar to the following:\nHTTP/1.1 200 OK content-type: text/plain; charset=UTF-8 content-length: 696 health status index uuid pri rep docs.count docs.deleted store.size pri.store.size green open .kibana_task_manager -IPDdiajTSyIRjelI2QJIg 1 0 2 0 12.6kb 12.6kb green open .kibana_1 YI9CZAjsTsCCuAyBb1ho3A 1 0 2 0 7.6kb 7.6kb yellow open logstash-2022.03.08 4pDJSTGVR3-oOwTtHnnTkQ 5 1 148 0 173.9kb 173.9kb Exit the bash shell by typing exit.\n Find the Kibana port by running the following command:\n$ kubectl get svc -n \u0026lt;namespace\u0026gt; | grep kibana For example:\n$ kubectl get svc -n oamns | grep kibana The output will look similar to the following:\nNAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE kibana NodePort 10.104.248.203 \u0026lt;none\u0026gt; 5601:31394/TCP 11m In the example above the Kibana port is 31394.\n Access the Kibana console with http://${MASTERNODE-HOSTNAME}:${KIBANA-PORT}/app/kibana.\n Click Dashboard and in the Create index pattern page enter logstash*. Click Next Step.\n From the Time Filter field name drop down menu select @timestamp and click Create index pattern.\n Once the index pattern is created click on Discover in the navigation menu to view the logs.\n For more details on how to use the Kibana console see the Kibana Guide\nCleanup To clean up the Elasticsearch and Kibana install:\n Run the following command to delete logstash:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/logstash.yaml The output will look similar to the following:\ndeployment.apps \u0026quot;logstash-wls\u0026quot; deleted Run the following command to delete Elasticsearch and Kibana:\n$ kubectl delete -f $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml The output will look similar to the following:\ndeployment.apps \u0026quot;elasticsearch\u0026quot; deleted service \u0026quot;elasticsearch\u0026quot; deleted deployment.apps \u0026quot;kibana\u0026quot; deleted service \u0026quot;kibana\u0026quot; deleted " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/categories/", + "title": "Categories", + "tags": [], + "description": "", + "content": "" +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/manage-oam-domains/monitoring-oam-domains/", + "title": "d. Monitoring an OAM domain", + "tags": [], + "description": "Describes the steps for Monitoring the OAM domain.", + "content": "After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain.\nThe WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics.\nThere are two ways to setup monitoring and you should choose one method or the other:\n Setup automatically using setup-monitoring.sh Setup using manual configuration Setup automatically using setup-monitoring.sh The $WORKDIR/kubernetes/monitoring-service/setup-monitoring.sh sets up the monitoring for the OAM domain. It installs Prometheus, Grafana, WebLogic Monitoring Exporter and deploys the web applications to the OAM domain. It also deploys the WebLogic Server Grafana dashboard.\nFor usage details execute ./setup-monitoring.sh -h.\n Edit the $WORKDIR/kubernetes/monitoring-service/monitoring-inputs.yaml and change the domainUID, domainNamespace, and weblogicCredentialsSecretName to correspond to your deployment. For example:\nversion: create-accessdomain-monitoring-inputs-v1 # Unique ID identifying your domain. # This ID must not contain an underscope (\u0026quot;_\u0026quot;), and must be lowercase and unique across all domains in a Kubernetes cluster. domainUID: accessdomain # Name of the domain namespace domainNamespace: oamns # Boolean value indicating whether to install kube-prometheus-stack setupKubePrometheusStack: true # Additional parameters for helm install kube-prometheus-stack # Refer https://github.com/prometheus-community/helm-charts/blob/main/charts/kube-prometheus-stack/values.yaml for additional parameters # Sample : # additionalParamForKubePrometheusStack: --set nodeExporter.enabled=false --set prometheusOperator.tls.enabled=false --set prometheusOperator.admissionWebhooks.enabled=false additionalParamForKubePrometheusStack: # Name of the monitoring namespace monitoringNamespace: monitoring # Name of the Admin Server adminServerName: AdminServer # # Port number for admin server adminServerPort: 7001 # Cluster name oamClusterName: oam_cluster # Port number for managed server oamManagedServerPort: 14100 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTooamCluster: true # Cluster name policyClusterName: policy_cluster # Port number for managed server policyManagedServerPort: 15100 # WebLogic Monitoring Exporter to Cluster wlsMonitoringExporterTopolicyCluster: true # Boolean to indicate if the adminNodePort will be exposed exposeMonitoringNodePort: true # NodePort to expose Prometheus prometheusNodePort: 32101 # NodePort to expose Grafana grafanaNodePort: 32100 # NodePort to expose Alertmanager alertmanagerNodePort: 32102 # Name of the Kubernetes secret for the Admin Server's username and password weblogicCredentialsSecretName: accessdomain-credentials Run the following command to setup monitoring.\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./setup-monitoring.sh -i monitoring-inputs.yaml The output should be similar to the following:\nMonitoring setup in monitoring in progress node/worker-node1 not labeled node/worker-node2 not labeled node/master-node not labeled Setup prometheus-community/kube-prometheus-stack started \u0026quot;prometheus-community\u0026quot; has been added to your repositories Hang tight while we grab the latest from your chart repositories... ...Successfully got an update from the \u0026quot;stable\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus\u0026quot; chart repository ...Successfully got an update from the \u0026quot;prometheus-community\u0026quot; chart repository ...Successfully got an update from the \u0026quot;appscode\u0026quot; chart repository Update Complete. ⎈ Happy Helming!⎈ Setup prometheus-community/kube-prometheus-stack in progress NAME: monitoring LAST DEPLOYED: Mon Mar 7 14:13:49 2022 NAMESPACE: monitoring STATUS: deployed REVISION: 1 NOTES: kube-prometheus-stack has been installed. Check its status by running: kubectl --namespace monitoring get pods -l \u0026quot;release=monitoring\u0026quot; Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create \u0026amp; configure Alertmanager and Prometheus instances using the Operator. Setup prometheus-community/kube-prometheus-stack completed Deploy WebLogic Monitoring Exporter started Deploying WebLogic Monitoring Exporter with domainNamespace[oamns], domainUID[accessdomain], adminServerPodName[accessdomain-adminserver] % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1564 0 --:--:-- --:--:-- --:--:-- 1566 100 2196k 100 2196k 0 0 2025k 0 0:00:01 0:00:01 --:--:-- 5951k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir created /tmp/ci-EHhB7bP847 /tmp/ci-EHhB7bP847 $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-e7wPrlLlud 14:26 /tmp/ci-e7wPrlLlud $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service created /tmp/ci-U38XXs6d06 /tmp/ci-U38XXs6d06 $WORKDIR/kubernetes/monitoring-service in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service Initializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Mar 7, 2022 2:14:31 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Mar 7, 2022 2:14:36 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed 14:27 Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... \u0026lt;Mar 7, 2022 2:14:37 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. \u0026lt;Mar 7, 2022 2:14:41 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... \u0026lt;Mar 7, 2022 2:14:44 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. \u0026lt;Mar 7, 2022 2:14:49 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Mar 7, 2022 2:14:52 PM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; 14:27 Deploy WebLogic Monitoring Exporter completed secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Deploying WebLogic Server Grafana Dashboard.... {\u0026quot;id\u0026quot;:25,\u0026quot;slug\u0026quot;:\u0026quot;weblogic-server-dashboard\u0026quot;,\u0026quot;status\u0026quot;:\u0026quot;success\u0026quot;,\u0026quot;uid\u0026quot;:\u0026quot;5yUwzbZWz\u0026quot;,\u0026quot;url\u0026quot;:\u0026quot;/d/5yUwzbZWz/weblogic-server-dashboard\u0026quot;,\u0026quot;version\u0026quot;:1} Deployed WebLogic Server Grafana Dashboard successfully Grafana is available at NodePort: 32100 Prometheus is available at NodePort: 32101 Altermanager is available at NodePort: 32102 ============================================================== Prometheus service discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on serviceMonitor/oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Note : It may take several minutes for serviceMonitor/oamns/wls-exporter/0 to appear, so refresh the page until it does.\nGrafana dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n In the Dashboards panel, click on WebLogic Server Dashboard. The dashboard for your OAM domain should be displayed. If it is not displayed, click the Search icon in the left hand menu and search for WebLogic Server Dashboard.\n Cleanup To uninstall the Prometheus, Grafana, WebLogic Monitoring Exporter and the deployments, you can run the $WORKDIR/monitoring-service/kubernetes/delete-monitoring.sh script. For usage details execute ./delete-monitoring.sh -h.\n To uninstall run the following command:\nFor example:\n$ cd $WORKDIR/kubernetes/monitoring-service $ ./delete-monitoring.sh -i monitoring-inputs.yaml $ kubectl delete namespace monitoring Setup using manual configuration Install Prometheus, Grafana and WebLogic Monitoring Exporter manually. Create the web applications and deploy to the OAM domain.\nDeploy the Prometheus operator Kube-Prometheus requires all nodes to be labelled with kubernetes.io/os=linux. To check if your nodes are labelled, run the following:\n$ kubectl get nodes --show-labels If the nodes are labelled the output will look similar to the following:\nNAME STATUS ROLES AGE VERSION LABELS worker-node1 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node1,kubernetes.io/os=linux worker-node2 Ready \u0026lt;none\u0026gt; 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=worker-node2,kubernetes.io/os=linux master-node Ready master 42d v1.20.10 beta.kubernetes.io/arch=amd64,beta.kubernetes.io/os=linux,kubernetes.io/arch=amd64,kubernetes.io/hostname=master-node,kubernetes.io/os=linux,node-role.kubernetes.io/master= If the nodes are not labelled, run the following command:\n$ kubectl label nodes --all kubernetes.io/os=linux Clone Prometheus by running the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service $ git clone https://github.com/coreos/kube-prometheus.git -b v0.7.0 Note: Please refer the compatibility matrix of Kube Prometheus. Please download the release of the repository according to the Kubernetes version of your cluster.\n Run the following command to create the namespace and custom resource definitions:\n$ cd kube-prometheus $ kubectl create -f manifests/setup The output will look similar to the following:\nnamespace/monitoring created customresourcedefinition.apiextensions.k8s.io/alertmanagerconfigs.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/alertmanagers.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/podmonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/probes.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheuses.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/prometheusrules.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/servicemonitors.monitoring.coreos.com created customresourcedefinition.apiextensions.k8s.io/thanosrulers.monitoring.coreos.com created clusterrole.rbac.authorization.k8s.io/prometheus-operator created clusterrolebinding.rbac.authorization.k8s.io/prometheus-operator created deployment.apps/prometheus-operator created service/prometheus-operator created serviceaccount/prometheus-operator created Run the following command to created the rest of the resources:\n$ kubectl create -f manifests/ The output will look similar to the following:\nalertmanager.monitoring.coreos.com/main created prometheusrule.monitoring.coreos.com/alertmanager-main-rules created secret/alertmanager-main created service/alertmanager-main created serviceaccount/alertmanager-main created servicemonitor.monitoring.coreos.com/alertmanager-main created clusterrole.rbac.authorization.k8s.io/blackbox-exporter created clusterrolebinding.rbac.authorization.k8s.io/blackbox-exporter created configmap/blackbox-exporter-configuration created deployment.apps/blackbox-exporter created service/blackbox-exporter created serviceaccount/blackbox-exporter created servicemonitor.monitoring.coreos.com/blackbox-exporter created secret/grafana-config created secret/grafana-datasources created configmap/grafana-dashboard-alertmanager-overview created configmap/grafana-dashboard-apiserver created configmap/grafana-dashboard-cluster-total created configmap/grafana-dashboard-controller-manager created configmap/grafana-dashboard-k8s-resources-cluster created configmap/grafana-dashboard-k8s-resources-namespace created configmap/grafana-dashboard-k8s-resources-node created configmap/grafana-dashboard-k8s-resources-pod created configmap/grafana-dashboard-k8s-resources-workload created configmap/grafana-dashboard-k8s-resources-workloads-namespace created configmap/grafana-dashboard-kubelet created configmap/grafana-dashboard-namespace-by-pod created configmap/grafana-dashboard-namespace-by-workload created configmap/grafana-dashboard-node-cluster-rsrc-use created configmap/grafana-dashboard-node-rsrc-use created configmap/grafana-dashboard-nodes created configmap/grafana-dashboard-persistentvolumesusage created configmap/grafana-dashboard-pod-total created configmap/grafana-dashboard-prometheus-remote-write created configmap/grafana-dashboard-prometheus created configmap/grafana-dashboard-proxy created configmap/grafana-dashboard-scheduler created configmap/grafana-dashboard-workload-total created configmap/grafana-dashboards created deployment.apps/grafana created service/grafana created serviceaccount/grafana created servicemonitor.monitoring.coreos.com/grafana created prometheusrule.monitoring.coreos.com/kube-prometheus-rules created clusterrole.rbac.authorization.k8s.io/kube-state-metrics created clusterrolebinding.rbac.authorization.k8s.io/kube-state-metrics created deployment.apps/kube-state-metrics created prometheusrule.monitoring.coreos.com/kube-state-metrics-rules created service/kube-state-metrics created serviceaccount/kube-state-metrics created servicemonitor.monitoring.coreos.com/kube-state-metrics created prometheusrule.monitoring.coreos.com/kubernetes-monitoring-rules created servicemonitor.monitoring.coreos.com/kube-apiserver created servicemonitor.monitoring.coreos.com/coredns created servicemonitor.monitoring.coreos.com/kube-controller-manager created servicemonitor.monitoring.coreos.com/kube-scheduler created servicemonitor.monitoring.coreos.com/kubelet created clusterrole.rbac.authorization.k8s.io/node-exporter created clusterrolebinding.rbac.authorization.k8s.io/node-exporter created daemonset.apps/node-exporter created prometheusrule.monitoring.coreos.com/node-exporter-rules created service/node-exporter created serviceaccount/node-exporter created servicemonitor.monitoring.coreos.com/node-exporter created apiservice.apiregistration.k8s.io/v1beta1.metrics.k8s.io created clusterrole.rbac.authorization.k8s.io/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/system:aggregated-metrics-reader created clusterrolebinding.rbac.authorization.k8s.io/prometheus-adapter created clusterrolebinding.rbac.authorization.k8s.io/resource-metrics:system:auth-delegator created clusterrole.rbac.authorization.k8s.io/resource-metrics-server-resources created configmap/adapter-config created deployment.apps/prometheus-adapter created rolebinding.rbac.authorization.k8s.io/resource-metrics-auth-reader created service/prometheus-adapter created serviceaccount/prometheus-adapter created servicemonitor.monitoring.coreos.com/prometheus-adapter created clusterrole.rbac.authorization.k8s.io/prometheus-k8s created clusterrolebinding.rbac.authorization.k8s.io/prometheus-k8s created prometheusrule.monitoring.coreos.com/prometheus-operator-rules created servicemonitor.monitoring.coreos.com/prometheus-operator created prometheus.monitoring.coreos.com/k8s created prometheusrule.monitoring.coreos.com/prometheus-k8s-prometheus-rules created rolebinding.rbac.authorization.k8s.io/prometheus-k8s-config created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created rolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s-config created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created service/prometheus-k8s created serviceaccount/prometheus-k8s created servicemonitor.monitoring.coreos.com/prometheus-k8s created unable to recognize \u0026quot;manifests/alertmanager-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-adapter-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; unable to recognize \u0026quot;manifests/prometheus-podDisruptionBudget.yaml\u0026quot;: no matches for kind \u0026quot;PodDisruptionBudget\u0026quot; in version \u0026quot;policy/v1\u0026quot; Provide external access for Grafana, Prometheus, and Alertmanager, by running the following commands:\n$ kubectl patch svc grafana -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32100 }]\u0026#39; $ kubectl patch svc prometheus-k8s -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32101 }]\u0026#39; $ kubectl patch svc alertmanager-main -n monitoring --type=json -p \u0026#39;[{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/type\u0026#34;, \u0026#34;value\u0026#34;: \u0026#34;NodePort\u0026#34; },{\u0026#34;op\u0026#34;: \u0026#34;replace\u0026#34;, \u0026#34;path\u0026#34;: \u0026#34;/spec/ports/0/nodePort\u0026#34;, \u0026#34;value\u0026#34;: 32102 }]\u0026#39; Note: This assigns port 32100 to Grafana, 32101 to Prometheus, and 32102 to Alertmanager.\nThe output will look similar to the following:\nservice/grafana patched service/prometheus-k8s patched service/alertmanager-main patched Verify that the Prometheus, Grafana, and Alertmanager pods are running in the monitoring namespace and the respective services have the exports configured correctly:\n$ kubectl get pods,services -o wide -n monitoring The output should look similar to the following:\nNAME READY STATUS RESTARTS AGE IP NODE NOMINATED NODE READINESS GATES pod/alertmanager-main-0 2/2 Running 0 67s 10.244.1.7 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-1 2/2 Running 0 67s 10.244.2.26 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/alertmanager-main-2 2/2 Running 0 67s 10.244.1.8 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/grafana-f8cd57fcf-tmlqt 1/1 Running 0 65s 10.244.2.28 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/kube-state-metrics-587bfd4f97-l8knh 3/3 Running 0 65s 10.244.1.9 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-2ztpd 2/2 Running 0 65s 10.247.95.26 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-92sxb 2/2 Running 0 65s 10.250.40.59 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/node-exporter-d77tl 2/2 Running 0 65s 10.196.54.36 master-node \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-adapter-69b8496df6-6gqrz 1/1 Running 0 65s 10.244.2.29 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-0 2/2 Running 1 66s 10.244.2.27 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-k8s-1 2/2 Running 1 66s 10.244.1.10 worker-node1 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; pod/prometheus-operator-7649c7454f-9p747 2/2 Running 0 2m 10.244.2.25 worker-node2 \u0026lt;none\u0026gt; \u0026lt;none\u0026gt; NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE SELECTOR service/alertmanager-main NodePort 10.104.92.62 \u0026lt;none\u0026gt; 9093:32102/TCP 67s alertmanager=main,app=alertmanager service/alertmanager-operated ClusterIP None \u0026lt;none\u0026gt; 9093/TCP,9094/TCP,9094/UDP 67s app=alertmanager service/grafana NodePort 10.100.171.3 \u0026lt;none\u0026gt; 3000:32100/TCP 66s app=grafana service/kube-state-metrics ClusterIP None \u0026lt;none\u0026gt; 8443/TCP,9443/TCP 66s app.kubernetes.io/name=kube-state-metrics service/node-exporter ClusterIP None \u0026lt;none\u0026gt; 9100/TCP 66s app.kubernetes.io/name=node-exporter service/prometheus-adapter ClusterIP 10.109.248.92 \u0026lt;none\u0026gt; 443/TCP 66s name=prometheus-adapter service/prometheus-k8s NodePort 10.98.212.247 \u0026lt;none\u0026gt; 9090:32101/TCP 66s app=prometheus,prometheus=k8s service/prometheus-operated ClusterIP None \u0026lt;none\u0026gt; 9090/TCP 66s app=prometheus service/prometheus-operator ClusterIP None \u0026lt;none\u0026gt; 8443/TCP 2m1s app.kubernetes.io/component=controller,app.kubernetes.io/name=prometheus-operator Deploy WebLogic Monitoring Exporter Generate the WebLogic Monitoring Exporter deployment package. The wls-exporter.war package need to be updated and created for each listening port (Administration Server and Managed Servers) in the domain. Set the below environment values and run the script get-wls-exporter.sh to generate the required WAR files at ${WORKDIR}/kubernetes/monitoring-service/scripts/wls-exporter-deploy:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ export adminServerPort=7001 $ export wlsMonitoringExporterTopolicyCluster=true $ export policyManagedServerPort=15100 $ export wlsMonitoringExporterTooamCluster=true $ export oamManagedServerPort=14100 $ sh get-wls-exporter.sh The output will look similar to the following:\n % Total % Received % Xferd Average Speed Time Time Time Current Dload Upload Total Spent Left Speed 100 655 100 655 0 0 1107 0 --:--:-- --:--:-- --:--:-- 1108 100 2196k 100 2196k 0 0 1787k 0 0:00:01 0:00:01 --:--:-- 9248k created $WORKDIR/kubernetes/monitoring-service/scripts/wls-exporter-deploy dir domainNamespace is empty, setting to default oamns domainUID is empty, setting to default accessdomain weblogicCredentialsSecretName is empty, setting to default \u0026quot;accessdomain-domain-credentials\u0026quot; adminServerName is empty, setting to default \u0026quot;AdminServer\u0026quot; oamClusterName is empty, setting to default \u0026quot;oam_cluster\u0026quot; policyClusterName is empty, setting to default \u0026quot;policy_cluster\u0026quot; created /tmp/ci-Bu74rCBxwu /tmp/ci-Bu74rCBxwu $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-RQv3rLbLsX /tmp/ci-RQv3rLbLsX $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts created /tmp/ci-DWIYlocP5e /tmp/ci-DWIYlocP5e $WORKDIR/kubernetes/monitoring-service/scripts in temp dir adding: WEB-INF/weblogic.xml (deflated 61%) adding: config.yml (deflated 60%) $WORKDIR/kubernetes/monitoring-service/scripts Deploy the WebLogic Monitoring Exporter WAR files into the Oracle Access Management domain:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true For example:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts $ kubectl cp wls-exporter-deploy oamns/accessdomain-adminserver:/u01/oracle $ kubectl cp deploy-weblogic-monitoring-exporter.py oamns/accessdomain-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n oamns accessdomain-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/deploy-weblogic-monitoring-exporter.py -domainName accessdomain -adminServerName AdminServer -adminURL accessdomain-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true The output will look similar to the following:\nInitializing WebLogic Scripting Tool (WLST) ... Welcome to WebLogic Server Administration Scripting Shell Type help() for help on available commands Connecting to t3://accessdomain-adminserver:7001 with userid weblogic ... Successfully connected to Admin Server \u0026quot;AdminServer\u0026quot; that belongs to domain \u0026quot;accessdomain\u0026quot;. Warning: An insecure protocol was used to connect to the server. To ensure on-the-wire security, the SSL port or Admin port should be used instead. Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war to targets AdminServer (upload=true) ... \u0026lt;Mar 7, 2022 3:38:15 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-adminserver [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-adminserver.war], to AdminServer .\u0026gt; ..Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-adminserver. \u0026lt;Mar 7, 2022 3:38:25 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-adminserver [archive: null], to AdminServer .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war to targets oam_cluster (upload=true) ... \u0026lt;Mar 7, 2022 3:38:28 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-oam [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-oam.war], to oam_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-oam. \u0026lt;Mar 7, 2022 3:38:34 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-oam [archive: null], to oam_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Deploying ......... Deploying application from /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war to targets policy_cluster (upload=true) ... \u0026lt;Mar 7, 2022 3:38:38 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating deploy operation for application, wls-exporter-policy [archive: /u01/oracle/wls-exporter-deploy/wls-exporter-policy.war], to policy_cluster .\u0026gt; .Completed the deployment of Application with status completed Current Status of your Deployment: Deployment command type: deploy Deployment State : completed Deployment Message : no message Starting application wls-exporter-policy. \u0026lt;Mar 7, 2022 3:38:44 PM GMT\u0026gt; \u0026lt;Info\u0026gt; \u0026lt;J2EE Deployment SPI\u0026gt; \u0026lt;BEA-260121\u0026gt; \u0026lt;Initiating start operation for application, wls-exporter-policy [archive: null], to policy_cluster .\u0026gt; .Completed the start of Application with status completed Current Status of your Deployment: Deployment command type: start Deployment State : completed Deployment Message : no message Disconnected from weblogic server: AdminServer Exiting WebLogic Scripting Tool. \u0026lt;Mar 7, 2022 3:38:47 PM GMT\u0026gt; \u0026lt;Warning\u0026gt; \u0026lt;JNDI\u0026gt; \u0026lt;BEA-050001\u0026gt; \u0026lt;WLContext.close() was called in a different thread than the one in which it was created.\u0026gt; Configure Prometheus Operator Prometheus enables you to collect metrics from the WebLogic Monitoring Exporter. The Prometheus Operator identifies the targets using service discovery. To get the WebLogic Monitoring Exporter end point discovered as a target, you must create a service monitor pointing to the service.\nThe exporting of metrics from wls-exporter requires basicAuth, so a Kubernetes Secret is created with the user name and password that are base64 encoded. This Secret is used in the ServiceMonitor deployment. The wls-exporter-ServiceMonitor.yaml has basicAuth with credentials as username: weblogic and password: \u0026lt;password\u0026gt; in base64 encoded.\n Run the following command to get the base64 encoded version of the weblogic password:\n$ echo -n \u0026#34;\u0026lt;password\u0026gt;\u0026#34; | base64 The output will look similar to the following:\nV2VsY29tZTE= Update the $WORKDIR/kubernetes/monitoring-service/manifests/wls-exporter-ServiceMonitor.yaml and change the password: value to the value returned above. Also change the namespace: and weblogic.domainName: values to match your OAM namespace and domain name:\napiVersion: v1 kind: Secret metadata: name: basic-auth namespace: oamns data: password: V2VsY29tZTE= user: d2VibG9naWM= type: Opaque --- apiVersion: monitoring.coreos.com/v1 kind: ServiceMonitor metadata: name: wls-exporter namespace: oamns labels: k8s-app: wls-exporter release: monitoring spec: namespaceSelector: matchNames: - oamns selector: matchLabels: weblogic.domainName: accessdomain endpoints: - basicAuth: password: name: basic-auth key: password username: name: basic-auth key: user port: default relabelings: - action: labelmap regex: __meta_kubernetes_service_label_(.+) interval: 10s honorLabels: true path: /wls-exporter/metrics Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleSpecific-domain-namespace.yaml and change the namespace to match your OAM namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: Role metadata: name: prometheus-k8s namespace: oamns rules: - apiGroups: - \u0026quot;\u0026quot; resources: - services - endpoints - pods verbs: - get - list - watch kind: RoleList Update the $WORKDIR/kubernetes/monitoring-service/manifests/prometheus-roleBinding-domain-namespace.yaml and change the namespace` to match your OAM namespace. For example:\napiVersion: rbac.authorization.k8s.io/v1 items: - apiVersion: rbac.authorization.k8s.io/v1 kind: RoleBinding metadata: name: prometheus-k8s namespace: oamns roleRef: apiGroup: rbac.authorization.k8s.io kind: Role name: prometheus-k8s subjects: - kind: ServiceAccount name: prometheus-k8s namespace: monitoring kind: RoleBindingList Run the following command to enable Prometheus:\n$ kubectl apply -f . The output will look similar to the following:\nrolebinding.rbac.authorization.k8s.io/prometheus-k8s created role.rbac.authorization.k8s.io/prometheus-k8s created secret/basic-auth created servicemonitor.monitoring.coreos.com/wls-exporter created Prometheus Service Discovery After the ServiceMonitor is deployed, the wls-exporter should be discovered by Prometheus and be able to collect metrics.\n Access the following URL to view Prometheus service discovery: http://${MASTERNODE-HOSTNAME}:32101/service-discovery\n Click on oamns/wls-exporter/0 and then show more. Verify all the targets are mentioned.\n Grafana Dashboard Access the Grafana dashboard with the following URL: http://${MASTERNODE-HOSTNAME}:32100 and login with admin/admin. Change your password when prompted.\n Import the Grafana dashboard by navigating on the left hand menu to Create \u0026gt; Import. Copy the content from $WORKDIR/kubernetes/monitoring-service/config/weblogic-server-dashboard-import.json and paste. Then click Load and Import. The dashboard should be displayed in the Dashboards panel.\n Cleanup To clean up a manual installation:\n Run the following commands:\n$ cd $WORKDIR/kubernetes/monitoring-service/manifests/ $ kubectl delete -f . Delete the deployments:\n$ cd $WORKDIR/kubernetes/monitoring-service/scripts/ $ kubectl cp undeploy-weblogic-monitoring-exporter.py \u0026lt;domain_namespace\u0026gt;/\u0026lt;domain_uid\u0026gt;-adminserver:/u01/oracle/wls-exporter-deploy $ kubectl exec -it -n \u0026lt;domain_namespace\u0026gt; \u0026lt;domain_uid\u0026gt;-adminserver -- /u01/oracle/oracle_common/common/bin/wlst.sh /u01/oracle/wls-exporter-deploy/undeploy-weblogic-monitoring-exporter.py -domainName \u0026lt;domain_uid\u0026gt; -adminServerName AdminServer -adminURL \u0026lt;domain_uid\u0026gt;-adminserver:7001 -username weblogic -password \u0026lt;password\u0026gt; -oamClusterName oam_cluster -wlsMonitoringExporterTooamCluster true -policyClusterName policy_cluster -wlsMonitoringExporterTopolicyCluster true Delete Prometheus:\n$ cd $WORKDIR/kubernetes/monitoring-service/kube-prometheus $ kubectl delete -f manifests $ kubectl delete -f manifests/setup Delete the monitoring namespace:\n$ kubectl delete namespace monitoring " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oam/manage-oam-domains/delete-domain-home/", + "title": "e. Delete the OAM domain home", + "tags": [], + "description": "Learn about the steps to cleanup the OAM domain home.", + "content": "Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script.\n Run the following command to delete the domain:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d \u0026lt;domain_uid\u0026gt; For example:\n$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d accessdomain Drop the RCU schemas as follows:\n$ kubectl exec -it helper -n \u0026lt;domain_namespace\u0026gt; -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=\u0026lt;db_host.domain\u0026gt;:\u0026lt;db_port\u0026gt;/\u0026lt;service_name\u0026gt; [oracle@helper ~]$ export RCUPREFIX=\u0026lt;rcu_schema_prefix\u0026gt; /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt For example:\n$ kubectl exec -it helper -n oamns -- /bin/bash [oracle@helper ~]$ export CONNECTION_STRING=mydatabasehost.example.com:1521/orcl.example.com [oracle@helper ~]$ export RCUPREFIX=OAMK8S /u01/oracle/oracle_common/bin/rcu -silent -dropRepository -databaseType ORACLE -connectString $CONNECTION_STRING \\ -dbUser sys -dbRole sysdba -selectDependentsForComponents true -schemaPrefix $RCUPREFIX \\ -component MDS -component IAU -component IAU_APPEND -component IAU_VIEWER -component OPSS \\ -component WLS -component STB -component OAM -f \u0026lt; /tmp/pwd.txt Delete the contents of the persistent volume, for example:\n$ rm -rf \u0026lt;persistent_volume\u0026gt;/accessdomainpv/* For example:\n$ rm -rf /scratch/shared/accessdomainpv/* Delete the WebLogic Kubernetes Operator, by running the following command:\n$ helm delete weblogic-kubernetes-operator -n opns Delete the label from the OAM namespace:\n$ kubectl label namespaces \u0026lt;domain_namespace\u0026gt; weblogic-operator- For example:\n$ kubectl label namespaces oamns weblogic-operator- Delete the service account for the operator:\n$ kubectl delete serviceaccount \u0026lt;sample-kubernetes-operator-sa\u0026gt; -n \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete serviceaccount op-sa -n opns Delete the operator namespace:\n$ kubectl delete namespace \u0026lt;sample-kubernetes-operator-ns\u0026gt; For example:\n$ kubectl delete namespace opns To delete NGINX:\n$ helm delete oam-nginx -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete oam-nginx -n oamns Then run:\n$ helm delete nginx-ingress -n \u0026lt;domain_namespace\u0026gt; For example:\n$ helm delete nginx-ingress -n oamns Delete the OAM namespace:\n$ kubectl delete namespace \u0026lt;domain_namespace\u0026gt; For example:\n$ kubectl delete namespace oamns " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/oid/", + "title": "Oracle Internet Directory", + "tags": [], + "description": "Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management", + "content": "Oracle Internet Directory provides a comprehensive Directory Solution for robust Identity Management. Oracle Internet Directory is an all-in-one directory solution with storage, proxy, synchronization and virtualization capabilities. While unifying the approach, it provides all the services required for high-performance Enterprise and carrier-grade environments. Oracle Internet Directory ensures scalability to billions of entries, ease of installation, elastic deployments, enterprise manageability and effective monitoring.\nThis project supports deployment of Oracle Internet Directory (OID) container images based on the 12cPS4 (12.2.1.4.0) release within a Kubernetes environment. The OID container image refers to binaries for OID Release 12.2.1.4.0.\nThis project has several key features to assist you with deploying and managing Oracle Internet Directory in a Kubernetes environment. You can:\n Create Oracle Internet Directory instances in a Kubernetes persistent volume (PV). This PV can reside in an NFS file system or other Kubernetes volume types. Start servers based on declarative startup parameters and desired states. Expose the Oracle Internet Directory services for external access. Scale Oracle Internet Directory by starting and stopping servers on demand. Monitor the Oracle Internet Directory instance using Prometheus and Grafana. Follow the instructions in this guide to set up Oracle Internet Directory on Kubernetes.\nCurrent production release The current production release for the Oracle Internet Directory 12c PS4 (12.2.1.4.0) deployment on Kubernetes is 22.2.1.\nRecent changes and known issues See the Release Notes for recent changes and known issues for Oracle Internet Directory deployment on Kubernetes.\nGetting started This documentation explains how to configure OID on a Kubernetes cluster where no other Oracle Identity Management products will be deployed. For detailed information about this type of deployment, start at Prerequisites and follow this documentation sequentially.\nIf performing an Enterprise Deployment, refer to the Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster instead.\nDocumentation for earlier releases To view documentation for an earlier release, see:\n Version 21.4.1 " +}, +{ + "uri": "/fmw-kubernetes/22.2.1/tags/", + "title": "Tags", + "tags": [], + "description": "", + "content": "" +}] diff --git a/docs/22.2.1/index.xml b/docs/22.2.1/index.xml new file mode 100644 index 000000000..0bdabcc6c --- /dev/null +++ b/docs/22.2.1/index.xml @@ -0,0 +1,461 @@ + + + + Oracle Fusion Middleware on Kubernetes + /fmw-kubernetes/22.2.1/ + Recent content on Oracle Fusion Middleware on Kubernetes + Hugo -- gohugo.io + en-us + Thu, 18 Apr 2019 06:46:23 -0500 + + + + + + a. Using Design Console with NGINX(non-SSL) + /fmw-kubernetes/22.2.1/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/configure-design-console/using-the-design-console-with-nginx-non-ssl/ + Configure an NGINX ingress (non-SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG Managed Server + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + Domain life cycle + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/domain-lifecycle/ + View existing OIG servers Starting/Scaling up OIG Managed servers Stopping/Scaling down OIG Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OIG domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OIG domain. +For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation. + + + + Release Notes + /fmw-kubernetes/22.2.1/oam/release-notes/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/release-notes/ + Review the latest changes and known issues for Oracle Access Management on Kubernetes. +Recent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November, 2021 21.4.2 Supports Oracle Access Management domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. + + + + Release Notes + /fmw-kubernetes/22.2.1/oid/release-notes/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oid/release-notes/ + Review the latest changes and known issues for Oracle Internet Directory on Kubernetes. +Recent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. October, 2021 21.4.1 Initial release of Oracle Identity Directory on Kubernetes. + + + + Release Notes + /fmw-kubernetes/22.2.1/oig/release-notes/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/release-notes/ + Review the latest changes and known issues for Oracle Identity Governance on Kubernetes. +Recent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November, 2021 21.4.2 Supports Oracle Identity Governance domain deployment using WebLogic Kubernetes Operator 3.3.0. Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. + + + + Release Notes + /fmw-kubernetes/22.2.1/oud/release-notes/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oud/release-notes/ + Review the latest changes and known issues for Oracle Unified Directory on Kubernetes. +Recent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. + + + + Release Notes + /fmw-kubernetes/22.2.1/oudsm/release-notes/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oudsm/release-notes/ + Review the latest changes and known issues for Oracle Unified Directory Services Manager on Kubernetes. +Recent changes Date Version Change April, 2022 22.2.1 Updated for CRI-O support. November 2021 21.4.2 Voyager ingress removed as no longer supported. October 2021 21.4.1 A) References to supported Kubernetes, Helm and Docker versions removed and replaced with Support note reference. B) Namespace and domain names changed to be consistent with Enterprise Deployment Guide for Oracle Identity and Access Management in a Kubernetes Cluster. + + + + b. Using Design Console with NGINX(SSL) + /fmw-kubernetes/22.2.1/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/configure-design-console/using-the-design-console-with-nginx-ssl/ + Configure an NGINX ingress (SSL) to allow Design Console to connect to your Kubernetes cluster. + Prerequisites + Setup routing rules for the Design Console ingress + Create the ingress + Update the T3 channel + Restart the OIG Managed Server + Design Console client +a. Using an on-premises installed Design Console +b. Using a container image for Design Console + Login to the Design Console + + + + WLST administration operations + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/wlst-admin-operations/ + Invoke WLST and access Administration Server To use WLST to administer the OIG domain, use a helper pod in the same Kubernetes cluster as the OIG Domain. + Check to see if the helper pod exists by running: +$ kubectl get pods -n <domain_namespace> | grep helper For example: +$ kubectl get pods -n oigns | grep helper The output should look similar to the following: +helper 1/1 Running 0 26h If the helper pod doesn’t exist then see Step 1 in Prepare your environment to create it. + + + + Runnning OIG utilities + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/running-oig-utilities/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/running-oig-utilities/ + Run OIG utlities inside the OIG Kubernetes cluster. +Run utilities in an interactive bash shell Access a bash shell inside the <domain_uid>-oim-server1 pod: +$ kubectl -n oigns exec -it <domain_uid>-oim-server1 -- bash For example: +$ kubectl -n oigns exec -it governancedomain-oim-server1 -- bash This will take you into a bash shell in the running <domain_uid>-oim-server1 pod: +[oracle@governancedomain-oim-server1 oracle]$ Navigate to the /u01/oracle/idm/server/bin directory and execute the utility as required. + + + + Logging and visualization + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/logging-and-visualization/ + After the OIG domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +Install Elasticsearch and Kibana If your domain namespace is anything other than oigns, edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of oigns to your domain namespace. + Create a Kubernetes secret to access the elasticsearch and kibana container images: +Note: You must first have a user account on hub. + + + + Monitoring an OIG domain + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/monitoring-oim-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/monitoring-oim-domains/ + After the OIG domain is set up you can monitor the OIG instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + Delete the OIG domain home + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/manage-oig-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d <domain_uid> For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d governancedomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=<db_host. + + + + Patch and Upgrade + /fmw-kubernetes/22.2.1/oid/patch-and-upgrade/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oid/patch-and-upgrade/ + Introduction In this section the Oracle Internet Directory (OID) deployment is updated with a new OID container image. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. +You can update the deployment with a new OID container image using one of the following methods: + Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory: + + + + Patch and Upgrade + /fmw-kubernetes/22.2.1/oud/patch-and-upgrade/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oud/patch-and-upgrade/ + Introduction In this section the Oracle Unified Directory (OUD) deployment is updated with a new OUD container image. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. +You can update the deployment with a new OUD container image using one of the following methods: + Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory: + + + + Patch and Upgrade + /fmw-kubernetes/22.2.1/oudsm/patch-and-upgrade/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oudsm/patch-and-upgrade/ + Introduction In this section the Oracle Unified Directory Services Manager (OUDSM) deployment is updated with a new OUDSM container image. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. +You can update the deployment with a new OUDSM container image using one of the following methods: + Using a YAML file Using --set argument Using a YAML file Navigate to the $WORKDIR/kubernetes/helm directory: + + + + a) Scaling Up/Down OID Pods + /fmw-kubernetes/22.2.1/oid/manage-oid-containers/scaling-up-down/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oid/manage-oid-containers/scaling-up-down/ + Introduction This section describes how to increase or decrease the number of OID pods in the Kubernetes deployment. +View existing OID pods By default the oid helm chart deployment starts two pods: oidhost1 and oidhost2. +The number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the two pods above. +To scale up or down the number of OID pods, set replicaCount accordingly. + + + + a) Scaling Up/Down OUD Pods + /fmw-kubernetes/22.2.1/oud/manage-oud-containers/scaling-up-down/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oud/manage-oud-containers/scaling-up-down/ + Introduction This section describes how to increase or decrease the number of OUD pods in the Kubernetes deployment. +View existing OUD pods By default the oud-ds-rs helm chart deployment starts three pods: oud-ds-rs-0 and two replica pods oud-ds-rs-1 and oud-ds-rs-2. +The number of pods started is determined by the replicaCount, which is set to 2 by default. A value of 2 starts the three pods above. +To scale up or down the number of OUD pods, set replicaCount accordingly. + + + + a) Scaling Up/Down OUDSM Pods + /fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/scaling-up-down/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/scaling-up-down/ + Introduction This section describes how to increase or decrease the number of OUDSM pods in the Kubernetes deployment. +View existing OUDSM pods By default the oudsm helm chart deployment starts one pod: oudsm-1. +The number of pods started is determined by the replicaCount, which is set to 1 by default. A value of 1 starts the pod above. +To scale up or down the number of OUDSM pods, set replicaCount accordingly. + + + + a. Domain Life Cycle + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/domain-lifecycle/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/domain-lifecycle/ + View existing OAM servers Starting/Scaling up OAM Managed servers Stopping/Scaling down OAM Managed servers Stopping and starting the Administration Server and Managed Servers Domain lifecycle sample scripts As OAM domains use the WebLogic Kubernetes Operator, domain lifecyle operations are managed using the WebLogic Kubernetes Operator itself. +This document shows the basic operations for starting, stopping and scaling servers in the OAM domain. +For more detailed information refer to Domain Life Cycle in the WebLogic Kubernetes Operator documentation. + + + + a. Patch an image + /fmw-kubernetes/22.2.1/oam/patch-and-upgrade/patch_an_image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/patch-and-upgrade/patch_an_image/ + Choose one of the following options to update your OAM kubernetes cluster to use the new image: + Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OAM Managed Servers. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. + + + + a. Patch an image + /fmw-kubernetes/22.2.1/oig/patch-and-upgrade/patch_an_image/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/patch-and-upgrade/patch_an_image/ + Choose one of the following options to update your OIG kubernetes cluster to use the new image: + Run the kubectl edit domain command Run the kubectl patch domain command In all of the above cases, the WebLogic Kubernetes Operator will restart the Administration Server pod first and then perform a rolling restart on the OIG Managed Servers. +Note: If you are not using Oracle Container Registry or your own container registry, then you must first load the new container image on all nodes in your Kubernetes cluster. + + + + a. Post Install Tasks + /fmw-kubernetes/22.2.1/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/post-install-config/set_oimfronendurl_using_mbeans/ + Follow these post install configuration steps. + Create a Server Overrides File Set OIMFrontendURL using MBeans Create a Server Overrides File Navigate to the following directory: +cd $WORKDIR/kubernetes/create-oim-domain/domain-home-on-pv/output/weblogic-domains/governancedomain Create a setUserOverrides.sh with the following contents: +DERBY_FLAG=false JAVA_OPTIONS="${JAVA_OPTIONS} -Djava.net.preferIPv4Stack=true" MEM_ARGS="-Xms8192m -Xmx8192m" Copy the setUserOverrides.sh file to the Administration Server pod: +$ chmod 755 setUserOverrides.sh $ kubectl cp setUserOverrides.sh oigns/governancedomain-adminserver:/u01/oracle/user_projects/domains/governancedomain/bin/setUserOverrides.sh Where oigns is the OIG namespace and governancedomain is the domain_UID. + + + + a. Using an Ingress with NGINX (non-SSL) + /fmw-kubernetes/22.2.1/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes (non-SSL) The instructions below explain how to set up NGINX as an ingress for the OIG domain with non-SSL termination. +Note: All the steps below should be performed on the master node. + Install NGINX +a. Configure the repository +b. Create a namespace +c. Install NGINX using helm +d. Setup routing rules for the domain + Create an ingress for the domain + + + + b) Logging and Visualization for Helm Chart oud-ds-rs Deployment + /fmw-kubernetes/22.2.1/oud/manage-oud-containers/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oud/manage-oud-containers/logging-and-visualization/ + Introduction Installation Create a Kubernetes secret Enable Elasticsearch, Logstash, and Kibana Upgrade OUD deployment with ELK configuration Verify the pods Verify using the Kibana application Introduction This section describes how to install and configure logging and visualization for the oud-ds-rs Helm chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK you can gain insights in real-time from the log data from your applications. + + + + b) Logging and Visualization for Helm Chart oudsm Deployment + /fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/logging-and-visualization/ + Introduction Installation Create a Kubernetes secret Enable Elasticsearch, Logstash, and Kibana Upgrade OUDSM deployment with ELK configuration Verify the pods Verify using the Kibana application Introduction This section describes how to install and configure logging and visualization for the oudsm Helm Chart deployment. +The ELK stack consists of Elasticsearch, Logstash, and Kibana. Using ELK we can gain insights in real-time from the log data from your applications. + + + + b. Install and configure connectors + /fmw-kubernetes/22.2.1/oig/post-install-config/install_and_configure_connectors/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/post-install-config/install_and_configure_connectors/ + Download the connector Download the Connector you are interested in from Oracle Identity Manager Connector Downloads. + Copy the connector zip file to a staging directory on the master node e.g. <workdir>/stage and unzip it: +$ cp $HOME/Downloads/<connector>.zip <workdir>/<stage>/ $ cd <workdir>/<stage> $ unzip <connector>.zip $ chmod -R 755 * For example: +$ cp $HOME/Downloads/Exchange-12.2.1.3.0.zip /scratch/OIGK8S/stage/ $ cd /scratch/OIGK8S/stage/ $ unzip exchange-12.2.1.3.0.zip $ chmod -R 755 * Copy OIG connectors There are two options to copy OIG Connectors to your Kubernetes cluster: + + + + b. Upgrade an operator release + /fmw-kubernetes/22.2.1/oam/patch-and-upgrade/upgrade_an_operator_release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/patch-and-upgrade/upgrade_an_operator_release/ + These instructions apply to upgrading the operator within the 3.x release family as additional versions are released. + On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: +$ mkdir <workdir>/weblogic-kubernetes-operator-3.X.X $ cd <workdir>/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X For example: +$ mkdir /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X $ cd /scratch/OAMK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X This will create the directory <workdir>/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator + Run the following helm command to upgrade the operator: + + + + b. Upgrade an operator release + /fmw-kubernetes/22.2.1/oig/patch-and-upgrade/upgrade_an_operator_release/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/patch-and-upgrade/upgrade_an_operator_release/ + These instructions apply to upgrading operators within the 3.x release family as additional versions are released. + On the master node, download the new WebLogic Kubernetes Operator source code from the operator github project: +$ mkdir <workdir>/weblogic-kubernetes-operator-3.X.X $ cd <workdir>/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X For example: +$ mkdir /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X $ cd /scratch/OIGK8S/weblogic-kubernetes-operator-3.X.X $ git clone https://github.com/oracle/weblogic-kubernetes-operator.git --branch v3.X.X This will create the directory <workdir>/weblogic-kubernetes-operator-3.X.X/weblogic-kubernetes-operator + Run the following helm command to upgrade the operator: + + + + b. Using an Ingress with NGINX (SSL) + /fmw-kubernetes/22.2.1/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oig/configure-ingress/ingress-nginx-setup-for-oig-domain-setup-on-k8s-ssl/ + Setting up an ingress for NGINX for the OIG domain on Kubernetes The instructions below explain how to set up NGINX as an ingress for the OIG domain with SSL termination. +Note: All the steps below should be performed on the master node. + Create a SSL certificate +a. Generate SSL certificate +b. Create a Kubernetes secret for SSL + Install NGINX +a. Configure the repository +b. Create a namespace + + + + b. WLST Administration Operations + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/wlst-admin-operations/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/wlst-admin-operations/ + To use WLST to administer the OAM domain, use the helper pod in the same Kubernetes cluster as the OAM Domain. + Check to see if the helper pod exists by running: +$ kubectl get pods -n <domain_namespace> | grep helper For example: +$ kubectl get pods -n oamns | grep helper The output should look similar to the following: +helper 1/1 Running 0 26h If the helper pod doesn’t exist then see Step 1 in Prepare your environment to create it. + + + + c) Monitoring an Oracle Internet Directory Instance + /fmw-kubernetes/22.2.1/oid/manage-oid-containers/monitoring-oid-instance/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oid/manage-oid-containers/monitoring-oid-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana objects created Add the NodePort Verify using Grafana GUI Introduction After the Oracle Internet Directory instance (OID) is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + c) Monitoring an Oracle Unified Directory Instance + /fmw-kubernetes/22.2.1/oud/manage-oud-containers/monitoring-oud-instance/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oud/manage-oud-containers/monitoring-oud-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana objects created Add the NodePort Verify using Grafana GUI Introduction After the Oracle Unified Directory instance (OUD) is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + c) Monitoring an Oracle Unified Directory Services Manager Instance + /fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oudsm/manage-oudsm-containers/monitoring-oudsm-instance/ + Introduction Install Prometheus and Grafana Create a Kubernetes namespace Add Prometheus and Grafana Helm repositories Install the Prometheus operator View Prometheus and Grafana Objects Created Add the NodePort Verify Using Grafana GUI Introduction After the Oracle Unified Directory Services Manager instance is set up you can monitor it using Prometheus and Grafana. +Install Prometheus and Grafana Create a Kubernetes namespace Create a Kubernetes namespace to provide a scope for Prometheus and Grafana objects such as pods and services that you create in the environment. + + + + c. Logging and Visualization + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/logging-and-visualization/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/logging-and-visualization/ + After the OAM domain is set up you can publish operator and WebLogic Server logs into Elasticsearch and interact with them in Kibana. +Install Elasticsearch and Kibana If your domain namespace is anything other than oamns, edit the $WORKDIR/kubernetes/elasticsearch-and-kibana/elasticsearch_and_kibana.yaml and change all instances of oamns to your domain namespace. + Create a Kubernetes secret to access the Elasticsearch and Kibana container images: +Note: You must first have a user account on hub. + + + + d. Monitoring an OAM domain + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/monitoring-oam-domains/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/monitoring-oam-domains/ + After the OAM domain is set up you can monitor the OAM instance using Prometheus and Grafana. See Monitoring a domain. +The WebLogic Monitoring Exporter uses the WLS RESTful Management API to scrape runtime information and then exports Prometheus-compatible metrics. It is deployed as a web application in a WebLogic Server (WLS) instance, version 12.2.1 or later, typically, in the instance from which you want to get metrics. +There are two ways to setup monitoring and you should choose one method or the other: + + + + e. Delete the OAM domain home + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/delete-domain-home/ + Mon, 01 Jan 0001 00:00:00 +0000 + + /fmw-kubernetes/22.2.1/oam/manage-oam-domains/delete-domain-home/ + Sometimes in production, but most likely in testing environments, you might want to remove the domain home that is generated using the create-domain.sh script. + Run the following command to delete the domain: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d <domain_uid> For example: +$ cd $WORKDIR/kubernetes/delete-domain $ ./delete-weblogic-domain-resources.sh -d accessdomain Drop the RCU schemas as follows: +$ kubectl exec -it helper -n <domain_namespace> -- /bin/bash [oracle@helper ~]$ [oracle@helper ~]$ export CONNECTION_STRING=<db_host. + + + + \ No newline at end of file diff --git a/docs/22.2.1/js/auto-complete.js b/docs/22.2.1/js/auto-complete.js new file mode 100644 index 000000000..7fbde995e --- /dev/null +++ b/docs/22.2.1/js/auto-complete.js @@ -0,0 +1,223 @@ +/* + JavaScript autoComplete v1.0.4 + Copyright (c) 2014 Simon Steinberger / Pixabay + GitHub: https://github.com/Pixabay/JavaScript-autoComplete + License: http://www.opensource.org/licenses/mit-license.php +*/ + +var autoComplete = (function(){ + // "use strict"; + function autoComplete(options){ + if (!document.querySelector) return; + + // helpers + function hasClass(el, className){ return el.classList ? el.classList.contains(className) : new RegExp('\\b'+ className+'\\b').test(el.className); } + + function addEvent(el, type, handler){ + if (el.attachEvent) el.attachEvent('on'+type, handler); else el.addEventListener(type, handler); + } + function removeEvent(el, type, handler){ + // if (el.removeEventListener) not working in IE11 + if (el.detachEvent) el.detachEvent('on'+type, handler); else el.removeEventListener(type, handler); + } + function live(elClass, event, cb, context){ + addEvent(context || document, event, function(e){ + var found, el = e.target || e.srcElement; + while (el && !(found = hasClass(el, elClass))) el = el.parentElement; + if (found) cb.call(el, e); + }); + } + + var o = { + selector: 0, + source: 0, + minChars: 3, + delay: 150, + offsetLeft: 0, + offsetTop: 1, + cache: 1, + menuClass: '', + renderItem: function (item, search){ + // escape special characters + search = search.replace(/[-\/\\^$*+?.()|[\]{}]/g, '\\$&'); + var re = new RegExp("(" + search.split(' ').join('|') + ")", "gi"); + return '
' + item.replace(re, "$1") + '
'; + }, + onSelect: function(e, term, item){} + }; + for (var k in options) { if (options.hasOwnProperty(k)) o[k] = options[k]; } + + // init + var elems = typeof o.selector == 'object' ? [o.selector] : document.querySelectorAll(o.selector); + for (var i=0; i 0) + that.sc.scrollTop = selTop + that.sc.suggestionHeight + scrTop - that.sc.maxHeight; + else if (selTop < 0) + that.sc.scrollTop = selTop + scrTop; + } + } + } + addEvent(window, 'resize', that.updateSC); + document.body.appendChild(that.sc); + + live('autocomplete-suggestion', 'mouseleave', function(e){ + var sel = that.sc.querySelector('.autocomplete-suggestion.selected'); + if (sel) setTimeout(function(){ sel.className = sel.className.replace('selected', ''); }, 20); + }, that.sc); + + live('autocomplete-suggestion', 'mouseover', function(e){ + var sel = that.sc.querySelector('.autocomplete-suggestion.selected'); + if (sel) sel.className = sel.className.replace('selected', ''); + this.className += ' selected'; + }, that.sc); + + live('autocomplete-suggestion', 'mousedown', function(e){ + if (hasClass(this, 'autocomplete-suggestion')) { // else outside click + var v = this.getAttribute('data-val'); + that.value = v; + o.onSelect(e, v, this); + that.sc.style.display = 'none'; + } + }, that.sc); + + that.blurHandler = function(){ + try { var over_sb = document.querySelector('.autocomplete-suggestions:hover'); } catch(e){ var over_sb = 0; } + if (!over_sb) { + that.last_val = that.value; + that.sc.style.display = 'none'; + setTimeout(function(){ that.sc.style.display = 'none'; }, 350); // hide suggestions on fast input + } else if (that !== document.activeElement) setTimeout(function(){ that.focus(); }, 20); + }; + addEvent(that, 'blur', that.blurHandler); + + var suggest = function(data){ + var val = that.value; + that.cache[val] = data; + if (data.length && val.length >= o.minChars) { + var s = ''; + for (var i=0;i 40) && key != 13 && key != 27) { + var val = that.value; + if (val.length >= o.minChars) { + if (val != that.last_val) { + that.last_val = val; + clearTimeout(that.timer); + if (o.cache) { + if (val in that.cache) { suggest(that.cache[val]); return; } + // no requests if previous suggestions were empty + for (var i=1; i https://github.com/noelboss/featherlight/issues/317 +!function(u){"use strict";if(void 0!==u)if(u.fn.jquery.match(/-ajax/))"console"in window&&window.console.info("Featherlight needs regular jQuery, not the slim version.");else{var r=[],i=function(t){return r=u.grep(r,function(e){return e!==t&&0','
','",'
'+n.loading+"
","
",""].join("")),o="."+n.namespace+"-close"+(n.otherClose?","+n.otherClose:"");return n.$instance=i.clone().addClass(n.variant),n.$instance.on(n.closeTrigger+"."+n.namespace,function(e){if(!e.isDefaultPrevented()){var t=u(e.target);("background"===n.closeOnClick&&t.is("."+n.namespace)||"anywhere"===n.closeOnClick||t.closest(o).length)&&(n.close(e),e.preventDefault())}}),this},getContent:function(){if(!1!==this.persist&&this.$content)return this.$content;var t=this,e=this.constructor.contentFilters,n=function(e){return t.$currentTarget&&t.$currentTarget.attr(e)},r=n(t.targetAttr),i=t.target||r||"",o=e[t.type];if(!o&&i in e&&(o=e[i],i=t.target&&r),i=i||n("href")||"",!o)for(var a in e)t[a]&&(o=e[a],i=t[a]);if(!o){var s=i;if(i=null,u.each(t.contentFilters,function(){return(o=e[this]).test&&(i=o.test(s)),!i&&o.regex&&s.match&&s.match(o.regex)&&(i=s),!i}),!i)return"console"in window&&window.console.error("Featherlight: no content filter found "+(s?' for "'+s+'"':" (no target specified)")),!1}return o.process.call(t,i)},setContent:function(e){return this.$instance.removeClass(this.namespace+"-loading"),this.$instance.toggleClass(this.namespace+"-iframe",e.is("iframe")),this.$instance.find("."+this.namespace+"-inner").not(e).slice(1).remove().end().replaceWith(u.contains(this.$instance[0],e[0])?"":e),this.$content=e.addClass(this.namespace+"-inner"),this},open:function(t){var n=this;if(n.$instance.hide().appendTo(n.root),!(t&&t.isDefaultPrevented()||!1===n.beforeOpen(t))){t&&t.preventDefault();var e=n.getContent();if(e)return r.push(n),s(!0),n.$instance.fadeIn(n.openSpeed),n.beforeContent(t),u.when(e).always(function(e){n.setContent(e),n.afterContent(t)}).then(n.$instance.promise()).done(function(){n.afterOpen(t)})}return n.$instance.detach(),u.Deferred().reject().promise()},close:function(e){var t=this,n=u.Deferred();return!1===t.beforeClose(e)?n.reject():(0===i(t).length&&s(!1),t.$instance.fadeOut(t.closeSpeed,function(){t.$instance.detach(),t.afterClose(e),n.resolve()})),n.promise()},resize:function(e,t){if(e&&t&&(this.$content.css("width","").css("height",""),this.$content.parent().width()');return n.onload=function(){r.naturalWidth=n.width,r.naturalHeight=n.height,t.resolve(r)},n.onerror=function(){t.reject(r)},n.src=e,t.promise()}},html:{regex:/^\s*<[\w!][^<]*>/,process:function(e){return u(e)}},ajax:{regex:/./,process:function(e){var n=u.Deferred(),r=u("
").load(e,function(e,t){"error"!==t&&n.resolve(r.contents()),n.fail()});return n.promise()}},iframe:{process:function(e){var t=new u.Deferred,n=u("