diff --git a/compcheck/comp_check.go b/compcheck/comp_check.go index c6f7ea9d2..6800a3f50 100644 --- a/compcheck/comp_check.go +++ b/compcheck/comp_check.go @@ -930,7 +930,7 @@ func ValidatePatternClusterNamespace(isNamespaceScoped bool, nodeNamespace strin } else { // is cluster scope if patternNamespace != "" { - return NewCompCheckError(fmt.Errorf(msgPrinter.Sprintf("The Cluster namespace specified in the pattern '%v' is %v, only pattern with empty clsuter namespace can be registered for cluster scoped agent", patternId, patternNamespace)), COMPCHECK_VALIDATION_ERROR) + return NewCompCheckError(fmt.Errorf(msgPrinter.Sprintf("The Cluster namespace specified in the pattern '%v' is %v, only pattern with empty cluster namespace can be registered for cluster scoped agent", patternId, patternNamespace)), COMPCHECK_VALIDATION_ERROR) } } return nil diff --git a/producer/producer_protocol_handler.go b/producer/producer_protocol_handler.go index c093bdfd7..eac91ffec 100644 --- a/producer/producer_protocol_handler.go +++ b/producer/producer_protocol_handler.go @@ -173,7 +173,9 @@ func (w *BaseProducerProtocolHandler) sendMessage(mt interface{}, pay []byte) er continue } } else { - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("Sent message for %v to exchange.", messageTarget.ReceiverExchangeId))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("Sent message for %v to exchange.", messageTarget.ReceiverExchangeId))) + } return nil } } @@ -469,7 +471,9 @@ func (w *BaseProducerProtocolHandler) MatchNodeType(tcPolicy *policy.Policy, dev } } - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("workload has the correct deployment for the node type '%v'", nodeType))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("workload has the correct deployment for the node type '%v'", nodeType))) + } return true, nil } } @@ -487,7 +491,9 @@ func (w *BaseProducerProtocolHandler) MatchClusterNamespace(tcPolicy *policy.Pol } else { compResult, _, reason := compcheck.CheckClusterNamespaceCompatibility(nodeType, cutil.GetClusterNamespace(), cutil.IsNamespaceScoped(), tcPolicy.ClusterNamespace, workload.ClusterDeployment, "", true, nil) if compResult { - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("cluster namespace matches. %v", reason))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("cluster namespace matches."))) + } } else { glog.Errorf(BPPHlogString(w.Name(), fmt.Sprintf("cluster namespace not match. %v", reason))) } @@ -512,7 +518,9 @@ func (w *BaseProducerProtocolHandler) MatchPattern(tcPolicy *policy.Policy, dev glog.Errorf(BPPHlogString(w.Name(), fmt.Sprintf("pattern from the proposal: '%v' does not match the pattern on the device: '%v'.", tcPolicy.PatternId, device_pattern))) return false, nil } else { - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("pattern from the proposal: '%v' matches the pattern on the device: '%v'.", tcPolicy.PatternId, device_pattern))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("pattern from the proposal: '%v' matches the pattern on the device: '%v'.", tcPolicy.PatternId, device_pattern))) + } return true, nil } } @@ -588,12 +596,16 @@ func (w *BaseProducerProtocolHandler) TerminateAgreement(ag *persistence.Establi func (w *BaseProducerProtocolHandler) GetAgbotMessageEndpoint(agbotId string) (string, []byte, error) { - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieving agbot %v msg endpoint from exchange", agbotId))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieving agbot %v msg endpoint from exchange", agbotId))) + } if ag, err := w.getAgbot(agbotId, w.ec.GetExchangeURL(), w.ec.GetExchangeId(), w.ec.GetExchangeToken()); err != nil { return "", nil, err } else { - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieved agbot %v msg endpoint from exchange %v", agbotId, ag.MsgEndPoint))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieved agbot %v msg endpoint from exchange %v", agbotId, ag.MsgEndPoint))) + } return ag.MsgEndPoint, ag.PublicKey, nil } @@ -601,7 +613,9 @@ func (w *BaseProducerProtocolHandler) GetAgbotMessageEndpoint(agbotId string) (s func (w *BaseProducerProtocolHandler) getAgbot(agbotId string, url string, deviceId string, token string) (*exchange.Agbot, error) { - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieving agbot %v from exchange", agbotId))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieving agbot %v from exchange", agbotId))) + } var resp interface{} resp = new(exchange.GetAgbotsResponse) @@ -632,7 +646,9 @@ func (w *BaseProducerProtocolHandler) getAgbot(agbotId string, url string, devic if ag, there := ags[agbotId]; !there { return nil, errors.New(fmt.Sprintf("agbot %v not in GET response %v as expected", agbotId, ags)) } else { - glog.V(5).Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieved agbot %v from exchange %v", agbotId, ag))) + if glog.V(5) { + glog.Infof(BPPHlogString(w.Name(), fmt.Sprintf("retrieved agbot %v from exchange %v", agbotId, ag))) + } return &ag, nil } } diff --git a/test/docker/fs/etc/agent-in-kube/horizon.env b/test/docker/fs/etc/agent-in-kube/horizon.env index cd23be19d..92fe969c7 100644 --- a/test/docker/fs/etc/agent-in-kube/horizon.env +++ b/test/docker/fs/etc/agent-in-kube/horizon.env @@ -1,6 +1,6 @@ HZN_EXCHANGE_URL=http://${EX_IP}:8080/v1 -HZN_FSS_CSSURL=https://${CSS_IP}:9443 -HZN_AGBOT_URL=https://${AGBOT_IP}:3111 +HZN_FSS_CSSURL=http://${CSS_IP}:8080 +HZN_AGBOT_URL=http://${AGBOT_IP}:8080 HZN_DEVICE_ID=agent-in-kube HZN_NODE_ID=agent-in-kube HZN_MGMT_HUB_CERT_PATH=/etc/default/cert/hub.crt diff --git a/test/gov/deployment_policies/userdev/bp_k8s_embedded_ns_update.json b/test/gov/deployment_policies/userdev/bp_k8s_embedded_ns_update.json new file mode 100644 index 000000000..db10b1844 --- /dev/null +++ b/test/gov/deployment_policies/userdev/bp_k8s_embedded_ns_update.json @@ -0,0 +1,14 @@ +{ + "service": { + "name": "k8s-service-embedded-ns", + "org": "e2edev@somecomp.com", + "arch": "amd64", + "clusterNamespace": "ns-in-policy", + "serviceVersions": [ + { + "version": "1.0.0", + "priority":{} + } + ] + } + } \ No newline at end of file diff --git a/test/gov/deployment_policies/userdev/bp_k8s_update.json b/test/gov/deployment_policies/userdev/bp_k8s_update.json new file mode 100644 index 000000000..2e70f1640 --- /dev/null +++ b/test/gov/deployment_policies/userdev/bp_k8s_update.json @@ -0,0 +1,14 @@ +{ + "service": { + "name": "k8s-service1", + "org": "e2edev@somecomp.com", + "arch": "amd64", + "clusterNamespace": "ns-in-policy", + "serviceVersions": [ + { + "version": "1.0.0", + "priority":{} + } + ] + } + } \ No newline at end of file diff --git a/test/gov/gov-combined.sh b/test/gov/gov-combined.sh index b264d1078..4a2555460 100755 --- a/test/gov/gov-combined.sh +++ b/test/gov/gov-combined.sh @@ -508,19 +508,6 @@ if [ "$HA" == "1" ]; then fi fi -#Start the edge cluster verification test. -if [ "$NOKUBE" != "1" ] && [ "$TESTFAIL" != "1" ] && [ "${TEST_PATTERNS}" == "" ] -then - echo -e "Verifying edge cluster agreement" - ./verify_edge_cluster.sh - if [ $? -ne 0 ]; then - echo "Failed edge cluster verification tests." - exit 1 - fi -else - echo -e "Edge cluster agreement verification skipped." -fi - # Clean up remote environment if [ ${REMOTE_HUB} -eq 1 ]; then echo "Clean up remote environment" diff --git a/test/gov/hzn_compcheck.sh b/test/gov/hzn_compcheck.sh index cc3ff3ad2..acc580cfb 100755 --- a/test/gov/hzn_compcheck.sh +++ b/test/gov/hzn_compcheck.sh @@ -2,6 +2,7 @@ USERDEV_ADMIN_AUTH="userdev/userdevadmin:userdevadminpw" export HZN_EXCHANGE_URL="${EXCH_APP_HOST}" +USERDEV_ORG="userdev" unset HZN_ORG_ID unset HZN_EXCHANGE_NODE_AUTH @@ -23,7 +24,11 @@ else service_location="/root/input_files/compcheck/service_location.json" bp_location="/root/input_files/compcheck/business_pol_location.json" pattern_sloc="/root/input_files/compcheck/pattern_sloc.json" -fi +fi + +if [[ "$NOKUBE" != "1" ]]; then + bp_k8s="/root/input_files/compcheck/business_pol_k8s_service1" +fi # check the the result to see if it matches the expected http code and error @@ -212,6 +217,15 @@ if [ $? -eq 0 ]; then echo "Service bluehorizon.network-services-location_2.0.7_${ARCH} should be compatible but not." exit 2 fi + +if [[ "$NOKUBE" != "1" ]]; then + echo -e "\n${PREFIX} test with conflict namespace" + CMD="hzn deploycheck policy -u $USERDEV_ADMIN_AUTH -B input_files/compcheck/business_pol_k8s_service1.json -n $USERDEV_ORG/agent-in-kube -s agent-namespace" + echo "$CMD" + RES=$($CMD 2>&1) + check_comp_results "$RES" "false" "Node properties do not satisfy constraint requirements." +fi + echo "Compatibility result expected." PREFIX="HZN userinput compatibility test:" diff --git a/test/gov/input_files/compcheck/business_pol_k8s_service1.json b/test/gov/input_files/compcheck/business_pol_k8s_service1.json new file mode 100644 index 000000000..1e0edabb9 --- /dev/null +++ b/test/gov/input_files/compcheck/business_pol_k8s_service1.json @@ -0,0 +1,34 @@ +{ + "label": "business policy for k8s-service1", + "description": "deploycheck test for k8s-service1", + "service": { + "name": "k8s-service1", + "org": "e2edev@somecomp.com", + "arch": "__ARCH__", + "clusterNamespace": "ns-in-policy", + "serviceVersions": [ + { + "version": "1.0.0", + "priority": {}, + "upgradePolicy": {} + } + ], + "nodeHealth": { + "missing_heartbeat_interval": 1800, + "check_agreement_status": 1800 + } + }, + "properties": [ + { + "name": "iame2edev", + "value": "true" + }, + { + "name": "NOK8S", + "value": false + } + ], + "constraints": [ + "purpose == network-testing && openhorizon.kubernetesNamespace == another-namespace" + ] +} diff --git a/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node.policy.json b/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node.policy.json new file mode 100644 index 000000000..3408f055e --- /dev/null +++ b/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node.policy.json @@ -0,0 +1 @@ +{"properties":[{"name":"purpose","value":"network-testing"}],"constraints":["service.embedded.ns == operator-embedded-ns"]} diff --git a/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node_ui.json b/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node_ui.json new file mode 100644 index 000000000..a6a91c0a4 --- /dev/null +++ b/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node_ui.json @@ -0,0 +1,23 @@ +[ + { + "serviceOrgid": "e2edev@somecomp.com", + "serviceUrl": "k8s-service-embedded-ns", + "serviceArch": "${ARCH}", + "serviceVersionRange": "[1.0.0,INFINITY)", + "inputs": [ + { + "name": "var1", + "value": "k8s String" + }, + { + "name": "var2", + "value": 7 + }, + { + "name": "var3", + "value": 88.8 + } + ] + } +] + diff --git a/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/topservice-operator-with-embedded-ns.tar.gz b/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/topservice-operator-with-embedded-ns.tar.gz new file mode 100644 index 000000000..3040c4f40 Binary files /dev/null and b/test/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/topservice-operator-with-embedded-ns.tar.gz differ diff --git a/test/gov/input_files/k8s_deploy/topservice-operator/node.policy.json b/test/gov/input_files/k8s_deploy/topservice-operator/node.policy.json new file mode 100644 index 000000000..875d0133b --- /dev/null +++ b/test/gov/input_files/k8s_deploy/topservice-operator/node.policy.json @@ -0,0 +1 @@ +{"properties":[{"name":"purpose","value":"network-testing"}],"constraints":["has.service.embedded.ns == false"]} diff --git a/test/gov/input_files/k8s_deploy/topservice-operator/node_ui.json b/test/gov/input_files/k8s_deploy/topservice-operator/node_ui.json new file mode 100644 index 000000000..e8607ea7c --- /dev/null +++ b/test/gov/input_files/k8s_deploy/topservice-operator/node_ui.json @@ -0,0 +1,23 @@ +[ + { + "serviceOrgid": "e2edev@somecomp.com", + "serviceUrl": "k8s-service1", + "serviceArch": "${ARCH}", + "serviceVersionRange": "[1.0.0,INFINITY)", + "inputs": [ + { + "name": "var1", + "value": "k8s String" + }, + { + "name": "var2", + "value": 7 + }, + { + "name": "var3", + "value": 88.8 + } + ] + } +] + diff --git a/test/gov/input_files/k8s_deploy/topservice-operator/topservice-operator.tar.gz b/test/gov/input_files/k8s_deploy/topservice-operator/topservice-operator.tar.gz new file mode 100644 index 000000000..3797abe9d Binary files /dev/null and b/test/gov/input_files/k8s_deploy/topservice-operator/topservice-operator.tar.gz differ diff --git a/test/gov/run_kube.sh b/test/gov/run_kube.sh index 415461022..90a55ba3e 100755 --- a/test/gov/run_kube.sh +++ b/test/gov/run_kube.sh @@ -7,17 +7,23 @@ fi # set -x +PREFIX="Cluster scoped agent test:" E2EDEVTEST_TEMPFS=$1 ANAX_SOURCE=$2 EXCH_ROOTPW=$3 DOCKER_TEST_NETWORK=$4 -NAME_SPACE="agent-namespace" +AGENT_NAME_SPACE="agent-namespace" +NAMESPACE_IN_POLICY="ns-in-policy" +SVC_EMBEDDED_NAMESPACE="operator-embedded-ns" +OPERATOR_DEPLOYMENT_NAME="topservice-operator" CONFIGMAP_NAME="agent-configmap-horizon" SECRET_NAME="agent-secret-cert" PVC_NAME="agent-pvc-horizon" WAIT_POD_MAX_TRY=30 +USERDEV_ADMIN_AUTH="userdev/userdevadmin:userdevadminpw" + isRoot=$(id -u) cprefix="sudo -E" @@ -29,7 +35,7 @@ fi # # Start the microk8s kube environment. If microk8s isnt installed, then install it. # -echo "Starting Kube test environment" +echo "Starting Kube test environment with $cprefix microk8s.start" $cprefix microk8s.start RC=$? sleep 2 @@ -171,7 +177,7 @@ fi # Now start deploying the agent, running in it's own namespace. # echo "Create namespace for the agent" -$cprefix microk8s.kubectl create namespace ${NAME_SPACE} +$cprefix microk8s.kubectl create namespace ${AGENT_NAME_SPACE} RC=$? if [ $RC -ne 0 ] then @@ -182,24 +188,24 @@ fi # Create a configmap based on ${E2EDEVTEST_TEMPFS}/etc/agent-in-kube/horizon echo "Create configmap to mount horizon env file" -$cprefix microk8s.kubectl create configmap ${CONFIGMAP_NAME} --from-file=${E2EDEVTEST_TEMPFS}/etc/agent-in-kube/horizon -n ${NAME_SPACE} +$cprefix microk8s.kubectl create configmap ${CONFIGMAP_NAME} --from-file=${E2EDEVTEST_TEMPFS}/etc/agent-in-kube/horizon -n ${AGENT_NAME_SPACE} RC=$? if [ $RC -ne 0 ] then echo "Failure creating configmap '${CONFIGMAP_NAME}' to mount horizon env file: $RC" - $cprefix microk8s.kubectl get configmap ${CONFIGMAP_NAME} -n ${NAME_SPACE} + $cprefix microk8s.kubectl get configmap ${CONFIGMAP_NAME} -n ${AGENT_NAME_SPACE} exit 1 fi # Create a secret based on ${E2EDEVTEST_TEMPFS}/etc/agent-in-kube/hub.crt if [ ${CERT_LOC} -eq "1" ]; then echo "Create secret to mount cert file" - $cprefix microk8s.kubectl create secret generic ${SECRET_NAME} --from-file=${E2EDEVTEST_TEMPFS}/etc/agent-in-kube/hub.crt -n ${NAME_SPACE} + $cprefix microk8s.kubectl create secret generic ${SECRET_NAME} --from-file=${E2EDEVTEST_TEMPFS}/etc/agent-in-kube/hub.crt -n ${AGENT_NAME_SPACE} RC=$? if [ $RC -ne 0 ] then echo "Failure creating secret '${SECRET_NAME}' to mount cert file: $RC" - $cprefix microk8s.kubectl get secret ${SECRET_NAME} -n ${NAME_SPACE} + $cprefix microk8s.kubectl get secret ${SECRET_NAME} -n ${AGENT_NAME_SPACE} exit 1 fi fi @@ -211,15 +217,15 @@ RC=$? if [ $RC -ne 0 ] then echo "Failure creating pvc '${PVC_NAME}' to mount db file: $RC" - $cprefix microk8s.kubectl get pvc ${PVC_NAME} -n ${NAME_SPACE} + $cprefix microk8s.kubectl get pvc ${PVC_NAME} -n ${AGENT_NAME_SPACE} exit 1 fi sleep 2 echo "Deploy the agent" -# Debug help = microk8s.kubectl describe pod -n ${NAME_SPACE} -# Debug help = microk8s.kubectl exec -it -n ${NAME_SPACE} /bin/bash +# Debug help = microk8s.kubectl describe pod -n ${AGENT_NAME_SPACE} +# Debug help = microk8s.kubectl exec -it -n ${AGENT_NAME_SPACE} /bin/bash $cprefix microk8s.kubectl apply -f ${E2EDEVTEST_TEMPFS}/etc/agent-in-kube/deployment.yaml RC=$? if [ $RC -ne 0 ] @@ -242,7 +248,7 @@ do echo "Timeout for waiting pod to become READY" exit 1 else - if [[ $($cprefix microk8s.kubectl get pods -n ${NAME_SPACE} -l app=agent -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then + if [[ $($cprefix microk8s.kubectl get pods -n ${AGENT_NAME_SPACE} -l app=agent -o 'jsonpath={..status.conditions[?(@.type=="Ready")].status}') != "True" ]]; then echo "waiting for pod: $i" ((i++)) sleep 1 @@ -254,17 +260,226 @@ done echo "Configuring agent for policy" -POD=$($cprefix microk8s.kubectl get pod -l app=agent -n ${NAME_SPACE} -o jsonpath="{.items[0].metadata.name}") +POD=$($cprefix microk8s.kubectl get pod -l app=agent -n ${AGENT_NAME_SPACE} -o jsonpath="{.items[0].metadata.name}") if [ $POD == "" ] then echo "Unable to find agent POD" exit 1 fi -$cprefix microk8s.kubectl cp $PWD/gov/input_files/k8s_deploy/node.policy.json ${NAME_SPACE}/${POD}:/home/agentuser/. -$cprefix microk8s.kubectl cp $PWD/gov/input_files/k8s_deploy/node_ui.json ${NAME_SPACE}/${POD}:/home/agentuser/. +$cprefix microk8s.kubectl cp $PWD/gov/deployment_policies/userdev/bp_k8s_update.json ${AGENT_NAME_SPACE}/${POD}:/home/agentuser/. +$cprefix microk8s.kubectl cp $PWD/gov/deployment_policies/userdev/bp_k8s_embedded_ns_update.json ${AGENT_NAME_SPACE}/${POD}:/home/agentuser/. + +$cprefix microk8s.kubectl cp $PWD/gov/input_files/k8s_deploy/topservice-operator/node.policy.json ${AGENT_NAME_SPACE}/${POD}:/home/agentuser/node.policy.k8s.svc1.json +$cprefix microk8s.kubectl cp $PWD/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node.policy.json ${AGENT_NAME_SPACE}/${POD}:/home/agentuser/node.policy.k8s.embedded.svc.json + +$cprefix microk8s.kubectl cp $PWD/gov/input_files/k8s_deploy/topservice-operator/node_ui.json ${AGENT_NAME_SPACE}/${POD}:/home/agentuser/node_ui_k8s_svc1.json +$cprefix microk8s.kubectl cp $PWD/gov/input_files/k8s_deploy/topservice-operator-with-embedded-ns/node_ui.json ${AGENT_NAME_SPACE}/${POD}:/home/agentuser/node_ui_k8s_embedded_svc.json + + +# cluster agent pattern test +# - Failed case: +# 1. pattern with cluster namespce -> unable to register (sk8s-with-cluster-ns) +# - Successful case: +# 1. pattern with empty cluster namespace, no embedded namespace -> service pod in agent namespace (e2edev@somecomp.com/sk8s) +# 2. pattern with empty cluster namespace but has embedded namespace which != agent namespace -> service pod in embedded namespace (sk8s-with-embedded-ns) +# After test, the cluster agent will register with e2edev@somecomp.com/sk8s, service pod will be deployed in "agent-namespace" + +# cluster agent policy test +# 1. business policy has no "clusterNamespace", policy constraints match the node, service has embedded ns, service deploy to "operator-embedded-ns" (bp_k8s_embedded_ns) +# 2. business policy has "clusterNamespace": "ns-in-policy", policy constraints match the node, service has embedded ns, service deploy to "ns-in-policy" (update bp_k8s_embedded_ns) +# 3. business policy has no "clusterNamespace", policy constraints match the node, the service deploy to "agent-namespace" (bp_k8s) +# 4. business policy has "clusterNamespace": "ns-in-policy", policy constraints match the node. service deploy to "ns-in-policy" (update bp_k8s) +# After test, the cluster agent will register with userdev/bp_k8s, service pod will be deployed in "ns-in-policy" + +AGBOT_URL="$AGBOT_IP:8080" +source gov/verify_edge_cluster.sh +kubecmd="$cprefix microk8s.kubectl" + +if [ "${TEST_PATTERNS}" != "" ]; then + # pattern case + # pattern name: e2edev@somecomp.com/sk8s-with-cluster-ns + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn register -f /home/agentuser/node_ui_k8s_svc1.json -p e2edev@somecomp.com/sk8s-with-cluster-ns -u root/root:${EXCH_ROOTPW} + if [ $? -eq 0 ]; then + echo -e "${PREFIX} cluster agent should return error when register a patter that has non-empty cluster namespace" + exit 2 + else + echo -e "${PREFIX} cluster agent get expected error when register sk8s-with-cluster-ns, which has non-empty cluster namespace" + fi + + + result=$($cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn node list | jq -r '.configstate.state') + if [ "$result" != "unconfigured" ]; then + echo -e "${PREFIX} anax-in-kube configstate.state is $result, should be in 'unconfigured' state" + exit 2 + else + echo -e "${PREFIX} cluster agent is in expected 'unconfigured' state" + fi + + # pattern name: e2edev@somecomp.com/sk8s-with-embedded-ns + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn register -f /home/agentuser/node_ui_k8s_embedded_svc.json -p e2edev@somecomp.com/sk8s-with-embedded-ns -u root/root:${EXCH_ROOTPW} + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to register pattern e2edev@somecomp.com/sk8s-with-embedded-ns" + exit 2 + else + echo -e "${PREFIX} cluster agent registered pattern e2edev@somecomp.com/sk8s-with-embedded-ns, verifying agreement..." + fi + + # wait 30s for agreement to comeup + sleep 30 + checkAndWaitForActiveAgreementForPattern "e2edev@somecomp.com/sk8s-with-embedded-ns" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check agreement for e2edev@somecomp.com/sk8s-with-embedded-ns" + exit 2 + fi + + checkDeploymentInNamespace "$kubecmd" $OPERATOR_DEPLOYMENT_NAME $SVC_EMBEDDED_NAMESPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check deployment for e2edev@somecomp.com/sk8s-with-embedded-ns" + exit 2 + fi + + echo -e "${PREFIX} cluster agent successfully registered with pattern e2edev@somecomp.com/sk8s-with-embedded-ns, unregistering... " + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn unregister -f + + # pattern name: e2edev@somecomp.com/sk8s + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn register -f /home/agentuser/node_ui_k8s_svc1.json -p e2edev@somecomp.com/sk8s -u root/root:${EXCH_ROOTPW} + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to register pattern e2edev@somecomp.com/sk8s" + exit 2 + else + echo -e "${PREFIX} cluster agent registered pattern e2edev@somecomp.com/sk8s, verifying agreement..." + fi + + sleep 30 + checkAndWaitForActiveAgreementForPattern "e2edev@somecomp.com/sk8s" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check agreement for e2edev@somecomp.com/sk8s" + exit 2 + fi + checkDeploymentInNamespace "$kubecmd" $OPERATOR_DEPLOYMENT_NAME $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check deployment for e2edev@somecomp.com/sk8s" + exit 2 + fi + + echo -e "${PREFIX} cluster agent successfully registered with pattern e2edev@somecomp.com/sk8s" +else + # policy case + # policy: userdev/bp_k8s_embedded_ns + echo -e "${PREFIX} cluster agent registers with deployment policy userdev/bp_k8s_embedded_ns" + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn register -f /home/agentuser/node_ui_k8s_embedded_svc.json --policy /home/agentuser/node.policy.k8s.embedded.svc.json -u root/root:${EXCH_ROOTPW} + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to register with deployment policy userdev/bp_k8s_embedded_ns" + exit 2 + else + echo -e "${PREFIX} cluster agent registered with deployment policy userdev/bp_k8s_embedded_ns, verifying agreement..." + fi + + sleep 30 + echo -e "kubecmd is: $kubecmd" #sudo -E microk8s.kubectl + checkAndWaitForActiveAgreementForPolicy "userdev/bp_k8s_embedded_ns" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check agreement for userdev/bp_k8s_embedded_ns" + exit 2 + fi + + checkDeploymentInNamespace "$kubecmd" $OPERATOR_DEPLOYMENT_NAME $SVC_EMBEDDED_NAMESPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check deployment for userdev/bp_k8s_embedded_ns" + exit 2 + fi + + # update policy userdev/bp_k8s_embedded_ns + echo -e "Updating deployment policy userdev/bp_k8s_embedded_ns to set \"clusterNamespace\": \"$NAMESPACE_IN_POLICY\"" + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- /usr/bin/hzn exchange business updatepolicy -f bp_k8s_embedded_ns_update.json bp_k8s_embedded_ns -u $USERDEV_ADMIN_AUTH + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to update deployment policy userdev/bp_k8s_embedded_ns" + exit 2 + fi + + echo -e "${PREFIX} sleep 30s to allow cluster agent agreement to be cancelled and re-negotiated" + sleep 30 + echo -e "${PREFIX} verify agreement is archived for deployment policy userdev/bp_k8s_embedded_ns" + checkArchivedAgreementForPolicy "userdev/bp_k8s_embedded_ns" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check archived agreement for userdev/bp_k8s_embedded_ns" + exit 2 + fi + + echo -e "${PREFIX} verify new agreement is active for deployment policy userdev/bp_k8s_embedded_ns" + checkAndWaitForActiveAgreementForPolicy "userdev/bp_k8s_embedded_ns" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check agreement for userdev/bp_k8s_embedded_ns" + exit 2 + fi + + echo -e "${PREFIX} verify service for deployment policy userdev/bp_k8s_embedded_ns are created under namespace \"$NAMESPACE_IN_POLICY\"" + checkDeploymentInNamespace "$kubecmd" $OPERATOR_DEPLOYMENT_NAME $NAMESPACE_IN_POLICY + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check deployment for userdev/bp_k8s_embedded_ns" + exit 2 + fi -$cprefix microk8s.kubectl exec ${POD} -it -n ${NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn register -f /home/agentuser/node_ui.json -p e2edev@somecomp.com/sk8s -u root/root:${EXCH_ROOTPW} -#$cprefix microk8s.kubectl exec ${POD} -it -n ${NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn register -f /home/agentuser/node_ui.json --policy /home/agentuser/node_ui.json -u root/root:${EXCH_ROOTPW} + echo -e "${PREFIX} cluster agent successfully registered with deployment policy userdev/bp_k8s_embedded_ns, unregistering... " + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn unregister -f + + # policy name: userdev/bp_k8s + echo -e "${PREFIX} cluster agent registers with deployment policy userdev/bp_k8s" + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- env ARCH=${ARCH} /usr/bin/hzn register -f /home/agentuser/node_ui_k8s_svc1.json --policy /home/agentuser/node.policy.k8s.svc1.json -u root/root:${EXCH_ROOTPW} + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to register with deployment policy userdev/bp_k8s" + exit 2 + else + echo -e "${PREFIX} cluster agent registered with deployment policy userdev/bp_k8s, verifying agreement..." + fi + + sleep 30 + checkAndWaitForActiveAgreementForPolicy "userdev/bp_k8s" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check agreement for userdev/bp_k8s" + exit 2 + fi + + checkDeploymentInNamespace "$kubecmd" $OPERATOR_DEPLOYMENT_NAME $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check deployment for userdev/bp_k8s" + exit 2 + fi + + # update policy userdev/bp_k8s + echo -e "Updating deployment policy userdev/bp_k8s to set \"clusterNamespace\": \"$NAMESPACE_IN_POLICY\"" + $cprefix microk8s.kubectl exec ${POD} -it -n ${AGENT_NAME_SPACE} -- /usr/bin/hzn exchange business updatepolicy -f bp_k8s_update.json bp_k8s -u $USERDEV_ADMIN_AUTH + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to update deployment policy userdev/bp_k8s" + exit 2 + fi + + echo -e "${PREFIX} sleep 30s to allow cluster agent agreement to be cancelled and re-negotiated" + sleep 30 + echo -e "${PREFIX} verify agreement is archived for deployment policy userdev/bp_k8s" + checkArchivedAgreementForPolicy "userdev/bp_k8s" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check archived agreement for userdev/bp_k8s" + exit 2 + fi + + echo -e "${PREFIX} verify new agreement is active for deployment policy userdev/bp_k8s" + checkAndWaitForActiveAgreementForPolicy "userdev/bp_k8s" $AGBOT_URL "$kubecmd" $POD $AGENT_NAME_SPACE + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check agreement for userdev/bp_k8s" + exit 2 + fi + + echo -e "${PREFIX} verify service for deployment policy userdev/bp_k8s are created under namespace \"$NAMESPACE_IN_POLICY\"" + checkDeploymentInNamespace "$kubecmd" $OPERATOR_DEPLOYMENT_NAME $NAMESPACE_IN_POLICY + if [ $? -ne 0 ]; then + echo -e "${PREFIX} cluster agent failed to check deployment for userdev/bp_k8s" + exit 2 + fi + + echo -e "${PREFIX} cluster agent successfully registered with deployment policy userdev/bp_k8s, service deployed under namespace \"$NAMESPACE_IN_POLICY\"" + +fi -echo "Configured agent for policy, waiting for the agbot to start." +echo -e "${PREFIX} complete cluster agent test" \ No newline at end of file diff --git a/test/gov/service_apireg.sh b/test/gov/service_apireg.sh index 613a4ec46..307d0fd83 100755 --- a/test/gov/service_apireg.sh +++ b/test/gov/service_apireg.sh @@ -659,7 +659,7 @@ cat <$KEY_TEST_DIR/svc_k8s1.json } ], "clusterDeployment": { - "operatorYamlArchive": "/root/input_files/k8s_deploy/topservice-operator.tar.gz" + "operatorYamlArchive": "/root/input_files/k8s_deploy/topservice-operator/topservice-operator.tar.gz" }, "clusterDeploymentSignature": "" } @@ -673,6 +673,50 @@ then exit 2 fi +VERS="1.0.0" +cat <$KEY_TEST_DIR/svc_k8s_embedded_ns.json +{ + "label":"Cluster service test for ${ARCH}", + "description":"Cluster Service with Embedded ns", + "public":true, + "sharable":"multiple", + "url":"k8s-service-embedded-ns", + "version":"$VERS", + "arch":"${ARCH}", + "requiredServices":[ + ], + "userInput": [ + { + "name": "var1", + "label": "", + "type": "string" + }, + { + "name": "var2", + "label": "", + "type": "int" + }, + { + "name": "var3", + "label": "", + "type": "float" + } + ], + "clusterDeployment": { + "operatorYamlArchive": "/root/input_files/k8s_deploy/topservice-operator-with-embedded-ns/topservice-operator-with-embedded-ns.tar.gz" + }, + "clusterDeploymentSignature": "" +} +EOF + +echo -e "Register k8s-service-embedded-ns $VERS:" +hzn exchange service publish -I -u $E2EDEV_ADMIN_AUTH -o e2edev@somecomp.com -f $KEY_TEST_DIR/svc_k8s_embedded_ns.json -k $KEY_TEST_DIR/*private.key -K $KEY_TEST_DIR/*public.pem +if [ $? -ne 0 ] +then + echo -e "hzn exchange service publish failed for k8s-service-embedded-ns." + exit 2 +fi + echo -e "Listing services:" hzn exchange service list -o e2edev@somecomp.com @@ -1019,6 +1063,79 @@ echo -e "Register k8s service pattern $VERS:" results "$RES" +# k8s pattern with cluster namespace +K8SVERS="1.0.0" +read -d '' sdef <