Versions Compared

Key

  • This line was added.
  • This line was removed.
  • Formatting was changed.

...

Use github to OAUTH authenticate your cluster just after installing it.

Last test 2018090520190305 using 3.0.1-ONAP

ONAP Development#Changemax-podsfromdefault110podlimit

Code Block
languagebash
themeMidnight
# 0 - verify the security group has all protocols (TCP/UCP) for 0.0.0.0/0 and ::/0
# to be save edit/make sure dns resolution is setup to the host
ubuntu@ld:~$ sudo cat /etc/hosts
127.0.0.1 cd.onap.info


# 1 - configure combined master/host VM - 26 min
sudo git clone https://gerrit.onap.org/r/logging-analytics
sudo cp logging-analytics/deploy/rancher/oom_rancher_setup.sh .
sudo ./oom_rancher_setup.sh -b master -s <your domain/ip> -e onap


# to deploy more than 110 pods per vm
before the environment (1a7) is created from the kubernetes template (1pt2) - at the waiting 3 min mark - edit it via https://wiki.onap.org/display/DW/ONAP+Development#ONAPDevelopment-Changemax-podsfromdefault110podlimit

--max-pods=900
https://lists.onap.org/g/onap-discuss/topic/oom_110_kubernetes_pod/25213556?p=,,,20,0,0,0::recentpostdate%2Fsticky,,,20,2,0,25213556


in "additional kubelet flags"
--max-pods=500
# on a 244G R4.8xlarge vm - 26 min later k8s cluster is up
NAMESPACE     NAME                                    READY     STATUS    RESTARTS   AGE
kube-system   heapster-6cfb49f776-5pq45               1/1       Running   0          10m
kube-system   kube-dns-75c8cb4ccb-7dlsh               3/3       Running   0          10m
kube-system   kubernetes-dashboard-6f4c8b9cd5-v625c   1/1       Running   0          10m
kube-system   monitoring-grafana-76f5b489d5-zhrjc     1/1       Running   0          10m
kube-system   monitoring-influxdb-6fc88bd58d-9494h    1/1       Running   0          10m
kube-system   tiller-deploy-8b6c5d4fb-52zmt           1/1       Running   0          2m

# 3 - secure via github oauth the master - immediately to lock out crypto miners
http://cd.onap.info:8880

# check the master cluster
ubuntu@ip-172-31-14-89:~$ kubectl top nodes
NAME                                         CPU(cores)   CPU%      MEMORY(bytes)   MEMORY%   
ip-172-31-8-245.us-east-2.compute.internal   179m         2%        2494Mi          4%        
ubuntu@ip-172-31-14-89:~$ kubectl get nodes -o wide
NAME                                         STATUS    ROLES     AGE       VERSION            EXTERNAL-IP   OS-IMAGE             KERNEL-VERSION   CONTAINER-RUNTIME
ip-172-31-8-245.us-east-2.compute.internal   Ready     <none>    13d       v1.10.3-rancher1   172.17.0.1    Ubuntu 16.04.1 LTS   4.4.0-1049-aws   docker://17.3.2

# 7 - after cluster is up - run cd.sh script to get onap up - customize your values.yaml - the 2nd time you run the script - a clean install - will clone new oom repo
# get the dev.yaml and set any pods you want up to true as well as fill out the openstack parameters
sudo wget https://git.onap.org/oom/plain/kubernetes/onap/resources/environments/dev.yaml
sudo cp dev.yaml dev0.yaml
sudo vi dev0.yaml 
sudo cp dev0.yaml dev1.yaml
sudo cp logging-analytics/deploy/cd.sh .

# this does a prepull (-p), clones 3.0.0-ONAP, managed install -f true
sudo ./cd.sh -b 3.0.0-ONAP -e onap -p true -n nexus3.onap.org:10001 -f true -s 300 -c true -d true -w false -r false
# check around 55 min (on a 256G single node - with 32 vCores)
pods/failed/up @ min and ram
161/13/153 @ 50m 107g
@55 min
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep onap | grep -E '1/1|2/2' | wc -l
152
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2' 
onap          dep-deployment-handler-5789b89d4b-s6fzw                 1/2       Running                 0          8m
onap          dep-service-change-handler-76dcd99f84-fchxd             0/1       ContainerCreating       0          3m
onap          onap-aai-champ-68ff644d85-rv7tr                         0/1       Running                 0          53m
onap          onap-aai-gizmo-856f86d664-q5pvg                         1/2       CrashLoopBackOff        9          53m
onap          onap-oof-85864d6586-zcsz5                               0/1       ImagePullBackOff        0          53m
onap          onap-pomba-kibana-d76b6dd4c-sfbl6                       0/1       Init:CrashLoopBackOff   7          53m
onap          onap-pomba-networkdiscovery-85d76975b7-mfk92            1/2       CrashLoopBackOff        9          53m
onap          onap-pomba-networkdiscoveryctxbuilder-c89786dfc-qnlx9   1/2       CrashLoopBackOff        9          53m
onap          onap-vid-84c88db589-8cpgr                               1/2       CrashLoopBackOff        7          52m

Note: DCAE has 2 sets of orchestration after the initial k8s orchestration - another at 57 min
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2' 
onap          dep-dcae-prh-6b5c6ff445-pr547                           0/2       ContainerCreating       0          2m
onap          dep-dcae-tca-analytics-7dbd46d5b5-bgrn9                 0/2       ContainerCreating       0          1m
onap          dep-dcae-ves-collector-59d4ff58f7-94rpq                 0/2       ContainerCreating       0          1m
onap          onap-aai-champ-68ff644d85-rv7tr                         0/1       Running                 0          57m
onap          onap-aai-gizmo-856f86d664-q5pvg                         1/2       CrashLoopBackOff        10         57m
onap          onap-oof-85864d6586-zcsz5                               0/1       ImagePullBackOff        0          57m
onap          onap-pomba-kibana-d76b6dd4c-sfbl6                       0/1       Init:CrashLoopBackOff   8          57m
onap          onap-pomba-networkdiscovery-85d76975b7-mfk92            1/2       CrashLoopBackOff        11         57m
onap          onap-pomba-networkdiscoveryctxbuilder-c89786dfc-qnlx9   1/2       Error                   10         57m
onap          onap-vid-84c88db589-8cpgr                               1/2       CrashLoopBackOff        9          57m

at 1 hour
ubuntu@ip-172-31-20-218:~$ free
              total        used        free      shared  buff/cache   available
Mem:      251754696   111586672    45000724      193628    95167300   137158588
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep onap | wc -l
164
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep onap | grep -E '1/1|2/2' | wc -l
155
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2' | wc -l
8
ubuntu@ip-172-31-20-218:~$ kubectl get pods --all-namespaces | grep -E '0/|1/2' 
onap          dep-dcae-ves-collector-59d4ff58f7-94rpq                 1/2       Running                 0          4m
onap          onap-aai-champ-68ff644d85-rv7tr                         0/1       Running                 0          59m
onap          onap-aai-gizmo-856f86d664-q5pvg                         1/2       CrashLoopBackOff        10         59m
onap          onap-oof-85864d6586-zcsz5                               0/1       ImagePullBackOff        0          59m
onap          onap-pomba-kibana-d76b6dd4c-sfbl6                       0/1       Init:CrashLoopBackOff   8          59m
onap          onap-pomba-networkdiscovery-85d76975b7-mfk92            1/2       CrashLoopBackOff        11         59m
onap          onap-pomba-networkdiscoveryctxbuilder-c89786dfc-qnlx9   1/2       CrashLoopBackOff        10         59m
onap          onap-vid-84c88db589-8cpgr                               1/2       CrashLoopBackOff        9          59m


ubuntu@ip-172-31-20-218:~$ df
Filesystem     1K-blocks     Used Available Use% Mounted on
udev           125869392        0 125869392   0% /dev
tmpfs           25175472    54680  25120792   1% /run
/dev/xvda1     121914320 91698036  30199900  76% /
tmpfs          125877348    30312 125847036   1% /dev/shm
tmpfs               5120        0      5120   0% /run/lock
tmpfs          125877348        0 125877348   0% /sys/fs/cgroup
tmpfs           25175472        0  25175472   0% /run/user/1000

todo: verify the release is there after a helm install - as the configMap size issue is breaking the release for now

...