Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Environment:
APP_FQI: policy@policy.onap.org
aaf_locate_url: https://aaf-locate.onap:8095
aaf_locator_container: oom
aaf_locator_container_ns: onap
aaf_locator_fqdn: policy
aaf_locator_app_ns: org.osaaf.aaf
DEPLOY_FQI: <set to the key 'login' in secret 'dev-policy-api-cert-initializer-deployer-creds'> Optional: false
DEPLOY_PASSWORD: <set to the key 'password' in secret 'dev-policy-api-cert-initializer-deployer-creds'> Optional: false
cadi_longitude: 0.0
cadi_latitude: 0.0
aaf_locator_public_fqdn: policy.onap.org
Mounts:
/opt/app/aaf_config/bin/aaf-add-config.sh from aaf-add-config (rw,path="aaf-add-config.sh")
/opt/app/aaf_config/bin/retrieval_check.sh from aaf-add-config (rw,path="retrieval_check.sh")
/opt/app/aaf_config/cert/truststoreONAP.p12.b64 from aaf-agent-certs (rw,path="truststoreONAP.p12.b64")
/opt/app/aaf_config/cert/truststoreONAPall.jks.b64 from aaf-agent-certs (rw,path="truststoreONAPall.jks.b64")
/opt/app/osaaf from dev-policy-api-aaf-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from dev-policy-api-read-token-jxvtg (ro)
Containers:
policy-api:
Container ID:
Image: nexus3.onap.org:10001/onap/policy-api:2.5-SNAPSHOT-latest
Image ID:
Port: 6969/TCP
Host Port: 0/TCP
Command:
sh
-c
Args:
source /opt/app/osaaf/local/.ci;/opt/app/policy/api/bin/policy-api.sh /opt/app/policy/api/etc/mounted/config.json
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Liveness: tcp-socket :6969 delay=20s timeout=1s period=10s #success=1 #failure=3
Readiness: tcp-socket :6969 delay=20s timeout=1s period=10s #success=1 #failure=3
Environment: <none>
Mounts:
/etc/localtime from localtime (ro)
/opt/app/osaaf from dev-policy-api-aaf-config (rw)
/opt/app/policy/api/etc/mounted from apiconfig-processed (rw)
/var/run/secrets/kubernetes.io/serviceaccount from dev-policy-api-read-token-jxvtg (ro)
Conditions:
Type Status
Initialized False
Ready False
ContainersReady False
PodScheduled True
Volumes:
dev-policy-api-aaf-config:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium: Memory
SizeLimit: <unset>
aaf-agent-certs:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: dev-cert-wrapper-certs
Optional: false
aaf-add-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: dev-policy-api-cert-initializer-add-config
Optional: false
localtime:
Type: HostPath (bare host directory volume)
Path: /etc/localtime
HostPathType:
apiconfig:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: dev-policy-api-configmap
Optional: false
apiconfig-processed:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium: Memory
SizeLimit: <unset>
dev-policy-api-read-token-jxvtg:
Type: Secret (a volume populated by a Secret)
SecretName: dev-policy-api-read-token-jxvtg
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 26m default-scheduler Successfully assigned onap/dev-policy-api-78976fbd5d-pbk4h to esy-master-policy-002-k8s-02
Normal Pulled 26m kubelet Container image "nexus3.onap.org:10001/onap/oom/readiness:3.0.1" already present on machine
Normal Created 26m kubelet Created container policy-api-readiness
Normal Started 26m kubelet Started container policy-api-readiness
Normal Pulled 24m kubelet Container image "docker.io/dibi/envsubst:1" already present on machine
Normal Created 24m kubelet Created container policy-api-update-config
Normal Started 24m kubelet Started container policy-api-update-config
Normal Created 21m (x4 over 24m) kubelet Created container policy-api-cert-initializer-readiness
Normal Started 21m (x4 over 24m) kubelet Started container policy-api-cert-initializer-readiness
Normal Pulled 19m (x5 over 24m) kubelet Container image "nexus3.onap.org:10001/onap/oom/readiness:3.0.1" already present on machine
Warning BackOff 65s (x77 over 22m) kubelet Back-off restarting failed container


root@esy-master-policy-002-nfs:~/oom/kubernetes# kubectl describe pod dev-policy-apex-pdp-0
Name: dev-policy-apex-pdp-0
Namespace: onap
Priority: 0
Node: esy-master-policy-002-k8s-02/172.16.1.104
Start Time: Thu, 26 Aug 2021 12:45:31 +0000
Labels: app=policy-apex-pdp
controller-revision-hash=dev-policy-apex-pdp-566cf5665
release=dev
statefulset.kubernetes.io/pod-name=dev-policy-apex-pdp-0
Annotations: cni.projectcalico.org/podIP: 10.42.2.109/32
cni.projectcalico.org/podIPs: 10.42.2.109/32
Status: Pending
IP: 10.42.2.109
IPs:
IP: 10.42.2.109
Controlled By: StatefulSet/dev-policy-apex-pdp
Init Containers:
policy-apex-pdp-update-config:
Container ID: docker://408ca808d6166e320989015311c8de82b6418828b1b05e1bced0c3a05cd7bdd5
Image: docker.io/dibi/envsubst:1
Image ID: docker-pullable://dibi/envsubst@sha256:6f1938ec2114e98406b9dcc638929fcbe68add3d94eb8629e0cbed03b72f09f8
Port: <none>
Host Port: <none>
Command:
sh
Args:
-c
cd /config-input && for PFILE in `ls -1`; do envsubst <${PFILE} >/config/${PFILE}; done
State: Terminated
Reason: Completed
Exit Code: 0
Started: Thu, 26 Aug 2021 12:45:41 +0000
Finished: Thu, 26 Aug 2021 12:45:41 +0000
Ready: True
Restart Count: 0
Environment:
TRUSTSTORE_PASSWORD: <set to the key 'password' in secret 'dev-policy-apex-pdp-truststore-pass'> Optional: false
KEYSTORE_PASSWORD: <set to the key 'password' in secret 'dev-policy-apex-pdp-keystore-pass'> Optional: false
RESTSERVER_USER: <set to the key 'login' in secret 'dev-policy-apex-pdp-restserver-creds'> Optional: false
RESTSERVER_PASSWORD: <set to the key 'password' in secret 'dev-policy-apex-pdp-restserver-creds'> Optional: false
Mounts:
/config from apexconfig (rw)
/config-input from apexconfig-input (rw)
/var/run/secrets/kubernetes.io/serviceaccount from dev-policy-apex-pdp-read-token-x829k (ro)
policy-apex-pdp-cert-initializer-readiness:
Container ID: docker://f8c75fa194e74bb6dc615d0df4cd88bb1b6ddbc488105048ec0e681d8857257e
Image: nexus3.onap.org:10001/onap/oom/readiness:3.0.1
Image ID: docker-pullable://nexus3.onap.org:10001/onap/oom/readiness@sha256:317c8a361ae73750f4d4a1b682c42b73de39083f73228dede31fd68b16c089db
Port: <none>
Host Port: <none>
Command:
/app/ready.py
Args:
--container-name
aaf-locate
--container-name
aaf-cm
--container-name
aaf-service
State: Waiting
Reason: CrashLoopBackOff
Last State: Terminated
Reason: OOMKilled
Exit Code: 137
Started: Thu, 26 Aug 2021 13:19:09 +0000
Finished: Thu, 26 Aug 2021 13:19:50 +0000
Ready: False
Restart Count: 10
Limits:
cpu: 100m
memory: 100Mi
Requests:
cpu: 3m
memory: 20Mi
Environment:
NAMESPACE: onap (v1:metadata.namespace)
Mounts:
/var/run/secrets/kubernetes.io/serviceaccount from dev-policy-apex-pdp-read-token-x829k (ro)
policy-apex-pdp-aaf-config:
Container ID:
Image: nexus3.onap.org:10001/onap/aaf/aaf_agent:2.1.20
Image ID:
Port: <none>
Host Port: <none>
Command:
sh
-c
/opt/app/aaf_config/bin/agent.sh
. /opt/app/aaf_config/bin/retrieval_check.sh
/opt/app/aaf_config/bin/aaf-add-config.sh

State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Environment:
APP_FQI: policy@policy.onap.org
aaf_locate_url: https://aaf-locate.onap:8095
aaf_locator_container: oom
aaf_locator_container_ns: onap
aaf_locator_fqdn: policy
aaf_locator_app_ns: org.osaaf.aaf
DEPLOY_FQI: <set to the key 'login' in secret 'dev-policy-apex-pdp-cert-initializer-deployer-creds'> Optional: false
DEPLOY_PASSWORD: <set to the key 'password' in secret 'dev-policy-apex-pdp-cert-initializer-deployer-creds'> Optional: false
cadi_longitude: 0.0
cadi_latitude: 0.0
aaf_locator_public_fqdn: policy.onap.org
Mounts:
/opt/app/aaf_config/bin/aaf-add-config.sh from aaf-add-config (rw,path="aaf-add-config.sh")
/opt/app/aaf_config/bin/retrieval_check.sh from aaf-add-config (rw,path="retrieval_check.sh")
/opt/app/aaf_config/cert/truststoreONAP.p12.b64 from aaf-agent-certs (rw,path="truststoreONAP.p12.b64")
/opt/app/aaf_config/cert/truststoreONAPall.jks.b64 from aaf-agent-certs (rw,path="truststoreONAPall.jks.b64")
/opt/app/osaaf from dev-policy-apex-pdp-aaf-config (rw)
/var/run/secrets/kubernetes.io/serviceaccount from dev-policy-apex-pdp-read-token-x829k (ro)
Containers:
policy-apex-pdp:
Container ID:
Image: nexus3.onap.org:10001/onap/policy-apex-pdp:2.6-SNAPSHOT-latest
Image ID:
Port: 6969/TCP
Host Port: 0/TCP
Command:
sh
-c
Args:
if [ -f /opt/app/osaaf/local/.ci ]; then . /opt/app/osaaf/local/.ci; fi;/opt/app/policy/apex-pdp/bin/apexOnapPf.sh -c /home/apexuser/config/OnapPfConfig.json
State: Waiting
Reason: PodInitializing
Ready: False
Restart Count: 0
Liveness: tcp-socket :6969 delay=20s timeout=1s period=10s #success=1 #failure=3
Readiness: tcp-socket :6969 delay=20s timeout=1s period=10s #success=1 #failure=3
Environment:
REPLICAS: 1
Mounts:
/etc/localtime from localtime (ro)
/home/apexuser/config from apexconfig (rw)
/opt/app/osaaf from dev-policy-apex-pdp-aaf-config (rw)
/var/log/onap from policy-logs (rw)
/var/run/secrets/kubernetes.io/serviceaccount from dev-policy-apex-pdp-read-token-x829k (ro)
Conditions:
Type Status
Initialized False
Ready False
ContainersReady False
PodScheduled True
Volumes:
dev-policy-apex-pdp-aaf-config:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium: Memory
SizeLimit: <unset>
aaf-agent-certs:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: dev-cert-wrapper-certs
Optional: false
aaf-add-config:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: dev-policy-apex-pdp-cert-initializer-add-config
Optional: false
localtime:
Type: HostPath (bare host directory volume)
Path: /etc/localtime
HostPathType:
policy-logs:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium:
SizeLimit: <unset>
apexconfig-input:
Type: ConfigMap (a volume populated by a ConfigMap)
Name: dev-policy-apex-pdp-configmap
Optional: false
apexconfig:
Type: EmptyDir (a temporary directory that shares a pod's lifetime)
Medium: Memory
SizeLimit: <unset>
dev-policy-apex-pdp-read-token-x829k:
Type: Secret (a volume populated by a Secret)
SecretName: dev-policy-apex-pdp-read-token-x829k
Optional: false
QoS Class: Burstable
Node-Selectors: <none>
Tolerations: node.kubernetes.io/not-ready:NoExecute op=Exists for 300s
node.kubernetes.io/unreachable:NoExecute op=Exists for 300s
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 35m default-scheduler Successfully assigned onap/dev-policy-apex-pdp-0 to esy-master-policy-002-k8s-02
Warning FailedMount 35m (x2 over 35m) kubelet MountVolume.SetUp failed for volume "apexconfig-input" : failed to sync configmap cache: timed out waiting for the condition
Warning FailedMount 35m (x3 over 35m) kubelet MountVolume.SetUp failed for volume "aaf-add-config" : failed to sync configmap cache: timed out waiting for the condition
Warning FailedMount 35m (x3 over 35m) kubelet MountVolume.SetUp failed for volume "dev-policy-apex-pdp-read-token-x829k" : failed to sync secret cache: timed out waiting for the condition
Normal Pulled 35m kubelet Container image "docker.io/dibi/envsubst:1" already present on machine
Normal Created 35m kubelet Created container policy-apex-pdp-update-config
Normal Started 35m kubelet Started container policy-apex-pdp-update-config
Normal Started 33m (x3 over 35m) kubelet Started container policy-apex-pdp-cert-initializer-readiness
Normal Pulled 32m (x4 over 35m) kubelet Container image "nexus3.onap.org:10001/onap/oom/readiness:3.0.1" already present on machine
Normal Created 32m (x4 over 35m) kubelet Created container policy-apex-pdp-cert-initializer-readiness
Warning BackOff 37s (x123 over 33m) kubelet Back-off restarting failed container