Skip to content
This repository has been archived by the owner on Jan 3, 2023. It is now read-only.

Commit

Permalink
Adding more unit tests
Browse files Browse the repository at this point in the history
  • Loading branch information
nikhiljindal committed Mar 20, 2018
1 parent f422948 commit 3c1ae3e
Show file tree
Hide file tree
Showing 10 changed files with 138 additions and 18 deletions.
15 changes: 11 additions & 4 deletions app/kubemci/cmd/remove_clusters.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,10 @@ var (

type removeClustersOptions struct {
// Name of the YAML file containing ingress spec.
// Required.
IngressFilename string
// Path to kubeconfig file.
// Required.
KubeconfigFilename string
// Names of the contexts to use from the kubeconfig file.
KubeContexts []string
Expand All @@ -59,7 +61,6 @@ type removeClustersOptions struct {
// value, then 'force' is a no-op.
ForceUpdate bool
// Name of the namespace for the ingress when none is provided (mismatch of option with spec causes an error).
// Optional.
Namespace string
}

Expand Down Expand Up @@ -98,7 +99,7 @@ func validateRemoveClustersArgs(options *removeClustersOptions, args []string) e
if len(args) != 1 {
return fmt.Errorf("unexpected args: %v. Expected one arg as name of load balancer.", args)
}
// Verify that the required params are not missing.
// Verify that the required options are not missing.
if options.IngressFilename == "" {
return fmt.Errorf("unexpected missing argument ingress.")
}
Expand Down Expand Up @@ -145,9 +146,15 @@ func runRemoveClusters(options *removeClustersOptions, args []string) error {
}

// Delete ingress resource from clusters
err = ingress.NewIngressSyncer().DeleteIngress(&ing, clients)
if err != nil {
is := ingress.NewIngressSyncer()
if is == nil {
err = multierror.Append(err, fmt.Errorf("unexpected ingress syncer is nil"))
// No point in proceeding.
return err
}
isErr := is.DeleteIngress(&ing, clients)
if isErr != nil {
err = multierror.Append(err, isErr)
}
return err
}
1 change: 1 addition & 0 deletions app/kubemci/pkg/gcp/backendservice/backendservicesyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -270,6 +270,7 @@ func (b *BackendServiceSyncer) desiredBackendService(lbName string, port ingress
}
}

// desiredBackendServiceWithoutClusters returns the desired backend service after removing the given instance groups from the given existing backend service.
func (b *BackendServiceSyncer) desiredBackendServiceWithoutClusters(existingBE *compute.BackendService, removeIGLinks []string) *compute.BackendService {
// Compute the backends to be removed.
removeBackends := desiredBackends(removeIGLinks)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -217,8 +217,8 @@ func TestRemoveFromClusters(t *testing.T) {
if err != nil {
t.Fatalf("expected nil error, actual: %v", err)
}
if len(be.Backends) != 1 {
t.Fatalf("expected the backend service to have 1 backend for ig: %s, actual: %v", ig2Link, be.Backends)
if len(be.Backends) != 1 || be.Backends[0].Group != ig1Link {
t.Fatalf("expected the backend service to have 1 backend for ig: %s, actual: %v", ig1Link, be.Backends)
}

// Cleanup
Expand Down
22 changes: 21 additions & 1 deletion app/kubemci/pkg/gcp/backendservice/fake_backendservicesyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,26 @@ func (h *FakeBackendServiceSyncer) DeleteBackendServices(ports []ingressbe.Servi
}

func (h *FakeBackendServiceSyncer) RemoveFromClusters(ports []ingressbe.ServicePort, removeIGLinks []string) error {
// TODO: Implement this.
// Convert array to maps for easier lookups.
affectedPorts := make(map[int64]bool, len(ports))
for _, v := range ports {
affectedPorts[v.Port] = true
}
removeLinks := make(map[string]bool, len(removeIGLinks))
for _, v := range removeIGLinks {
removeLinks[v] = true
}
for _, v := range h.EnsuredBackendServices {
if _, has := affectedPorts[v.Port.Port]; !has {
continue
}
newIGLinks := []string{}
for _, ig := range v.IGLinks {
if _, has := removeLinks[ig]; !has {
newIGLinks = append(newIGLinks, ig)
}
}
v.IGLinks = newIGLinks
}
return nil
}
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,5 @@ func (h *FakeFirewallRuleSyncer) RemoveFromClusters(lbName string, removeIGLinks
}
v.IGLinks = newIGLinks
}
h.EnsuredFirewallRules = nil
return nil
}
5 changes: 3 additions & 2 deletions app/kubemci/pkg/gcp/firewallrule/firewallrulesyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -93,7 +93,7 @@ func (s *FirewallRuleSyncer) RemoveFromClusters(lbName string, removeIGLinks map
glog.V(5).Infof("Received instance groups: %v", removeIGLinks)
err := s.removeFromClusters(lbName, removeIGLinks)
if err != nil {
return fmt.Errorf("Error %s in removing clusters from firewall rule", err)
return fmt.Errorf("Error in removing clusters from firewall rule: %s", err)
}
return nil
}
Expand Down Expand Up @@ -254,7 +254,8 @@ func (s *FirewallRuleSyncer) desiredFirewallRuleWithoutClusters(existingFW *comp
// rather than removing the ones for old clusters.
// But we do not want to change existing tags for clusters that are already working.
// Ideally network tags should only be appended to, so this should not be a problem.
// TODO(nikhiljindal): Fix this if it becomes a problem.
// TODO(nikhiljindal): Fix this if it becomes a problem:
// https://github.com/GoogleCloudPlatform/k8s-multicluster-ingress/issues/147
targetTags, err := s.getTargetTags(instances)
if err != nil {
return nil, err
Expand Down
14 changes: 13 additions & 1 deletion app/kubemci/pkg/gcp/forwardingrule/fake_forwardingrulesyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,6 +94,18 @@ func (f *FakeForwardingRuleSyncer) ListLoadBalancerStatuses() ([]status.LoadBala
}

func (f *FakeForwardingRuleSyncer) RemoveClustersFromStatus(clusters []string) error {
// TODO: Implement this.
removeClusters := make(map[string]bool, len(clusters))
for _, c := range clusters {
removeClusters[c] = true
}
for i, fr := range f.EnsuredForwardingRules {
newClusters := []string{}
for _, c := range fr.Clusters {
if _, has := removeClusters[c]; !has {
newClusters = append(newClusters, c)
}
}
f.EnsuredForwardingRules[i].Clusters = newClusters
}
return nil
}
4 changes: 2 additions & 2 deletions app/kubemci/pkg/gcp/forwardingrule/forwardingrulesyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -207,7 +207,7 @@ func (s *ForwardingRuleSyncer) ListLoadBalancerStatuses() ([]status.LoadBalancer
// See interface for comment.
func (s *ForwardingRuleSyncer) RemoveClustersFromStatus(clusters []string) error {
// Update HTTPS status first and then HTTP.
// This ensures that get-status returns the correct status even if HTTPS is updated but updating HTTP fails.
// This ensures that get-status returns the old status until both HTTPS and HTTP forwarding rules are updated.
// This is because get-status reads from HTTP if it exists.
// Also note that it continues silently if either HTTP or HTTPS forwarding rules do not exist.
if err := s.removeClustersFromStatus("https", s.namer.HttpsForwardingRuleName(), clusters); err != nil {
Expand All @@ -216,7 +216,7 @@ func (s *ForwardingRuleSyncer) RemoveClustersFromStatus(clusters []string) error
return s.removeClustersFromStatus("http", s.namer.HttpForwardingRuleName(), clusters)
}

// ensureForwardingRule ensures a forwarding rule exists as per the given input parameters.
// removeClustersFromStatus removes the given list of clusters from the existing forwarding rule with the given name.
func (s *ForwardingRuleSyncer) removeClustersFromStatus(httpProtocol, name string, clusters []string) error {
fmt.Println("Removing clusters", clusters, "from", httpProtocol, "forwarding rule")
existingFR, err := s.frp.GetGlobalForwardingRule(name)
Expand Down
8 changes: 5 additions & 3 deletions app/kubemci/pkg/gcp/loadbalancer/loadbalancersyncer.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@ func (l *LoadBalancerSyncer) CreateLoadBalancer(ing *v1beta1.Ingress, forceUpdat
if portsErr != nil {
err = multierror.Append(err, portsErr)
}
// Can not really create any backend service without named portsand instance groups. No point continuing.
// Can not really create any backend service without named ports and instance groups. No point continuing.
if len(igs) == 0 {
err = multierror.Append(err, fmt.Errorf("No instance group found. Can not continue"))
return err
Expand Down Expand Up @@ -217,8 +217,8 @@ func (l *LoadBalancerSyncer) CreateLoadBalancer(ing *v1beta1.Ingress, forceUpdat
}

// DeleteLoadBalancer deletes the GCP resources associated with the L7 GCP load balancer for the given ingress.
// TODO(nikhiljindal): Do not require the ingress yaml from users. Just the name should be enough. We can fetch ingress YAML from one of the clusters.
func (l *LoadBalancerSyncer) DeleteLoadBalancer(ing *v1beta1.Ingress) error {
// TODO(nikhiljindal): Dont require the ingress yaml from users. Just the name should be enough. We can fetch ingress YAML from one of the clusters.
client, cErr := getAnyClient(l.clients)
if cErr != nil {
// No point in continuing without a client.
Expand Down Expand Up @@ -262,8 +262,8 @@ func (l *LoadBalancerSyncer) DeleteLoadBalancer(ing *v1beta1.Ingress) error {
}

// DeleteLoadBalancer deletes the GCP resources associated with the L7 GCP load balancer for the given ingress.
// TODO(nikhiljindal): Do not require the ingress yaml from users. Just the name should be enough. We can fetch ingress YAML from one of the clusters.
func (l *LoadBalancerSyncer) RemoveFromClusters(ing *v1beta1.Ingress, removeClients map[string]kubeclient.Interface, forceUpdate bool) error {
// TODO(nikhiljindal): Dont require the ingress yaml from users. Just the name should be enough. We can fetch ingress YAML from one of the clusters.
client, cErr := getAnyClient(l.clients)
if cErr != nil {
// No point in continuing without a client.
Expand All @@ -287,6 +287,8 @@ func (l *LoadBalancerSyncer) RemoveFromClusters(ing *v1beta1.Ingress, removeClie
// Aggregate errors and return all at the end.
err = multierror.Append(err, fwErr)
}

// Convert the map of instance groups to a flat array.
igsForBE := []string{}
for k := range removeIGLinks {
igsForBE = append(igsForBE, removeIGLinks[k]...)
Expand Down
82 changes: 80 additions & 2 deletions app/kubemci/pkg/gcp/loadbalancer/loadbalancersyncer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ func TestCreateLoadBalancer(t *testing.T) {
lbc.ipp.ReserveGlobalAddress(ipAddress)
ing, err := setupLBCForCreateIng(lbc, nodePort, igName, igZone, zoneLink, ipAddress)
if err != nil {
t.Fatalf("%s", err)
t.Fatalf("unexpected error in test setup: %s", err)
}
if err := lbc.CreateLoadBalancer(ing, true /*forceUpdate*/, clusters); err != nil {
t.Fatalf("unexpected error %s", err)
Expand Down Expand Up @@ -294,7 +294,7 @@ func TestDeleteLoadBalancer(t *testing.T) {
lbc.ipp.ReserveGlobalAddress(ipAddress)
ing, err := setupLBCForCreateIng(lbc, nodePort, igName, igZone, zoneLink, ipAddress)
if err != nil {
t.Fatalf("%s", err)
t.Fatalf("unexpected error in test setup: %s", err)
}
if err := lbc.CreateLoadBalancer(ing, true /*forceUpdate*/, clusters); err != nil {
t.Fatalf("unexpected error %s", err)
Expand Down Expand Up @@ -347,6 +347,84 @@ func TestDeleteLoadBalancer(t *testing.T) {
}
}

func TestRemoveFromClusters(t *testing.T) {
lbName := "lb-name"
nodePort := int64(32211)
igName := "my-fake-ig"
igZone := "my-fake-zone"
zoneLink := fmt.Sprintf("https://www.googleapis.com/compute/v1/projects/fake-project/zones/%s", igZone)
clusters := []string{"cluster1", "cluster2"}
lbc := newLoadBalancerSyncer(lbName)
ipAddress := &compute.Address{
Name: "ipAddressName",
Address: "1.2.3.4",
}
// Reserve a global address. User is supposed to do this before calling CreateLoadBalancer.
lbc.ipp.ReserveGlobalAddress(ipAddress)
ing, err := setupLBCForCreateIng(lbc, nodePort, igName, igZone, zoneLink, ipAddress)
if err != nil {
t.Fatalf("unexpected error in test setup: %s", err)
}
if err := lbc.CreateLoadBalancer(ing, true /*forceUpdate*/, clusters); err != nil {
t.Fatalf("unexpected error %s", err)
}
fhc := lbc.hcs.(*healthcheck.FakeHealthCheckSyncer)
if len(fhc.EnsuredHealthChecks) == 0 {
t.Errorf("unexpected health checks not created")
}
fbs := lbc.bss.(*backendservice.FakeBackendServiceSyncer)
if len(fbs.EnsuredBackendServices) == 0 {
t.Errorf("unexpected backend services not created")
}
fum := lbc.ums.(*urlmap.FakeURLMapSyncer)
if len(fum.EnsuredURLMaps) == 0 {
t.Errorf("unexpected url maps not created")
}
ftp := lbc.tps.(*targetproxy.FakeTargetProxySyncer)
if len(ftp.EnsuredTargetProxies) == 0 {
t.Errorf("unexpected target proxies not created")
}
ffr := lbc.frs.(*forwardingrule.FakeForwardingRuleSyncer)
if len(ffr.EnsuredForwardingRules) == 0 {
t.Errorf("unexpected forwarding rules not created")
}
ffw := lbc.fws.(*firewallrule.FakeFirewallRuleSyncer)
if len(ffw.EnsuredFirewallRules) == 0 {
t.Errorf("unexpected firewall rules not created")
}
// Verify that the load balancer is spread to both clusters.
if err := verifyClusters(lbName, lbc.frs, clusters); err != nil {
t.Errorf("%s", err)
}

// Remove the load balancer from the first cluster and verify that the load balancer status is updated appropriately.
removeClients := map[string]kubeclient.Interface{
"cluster1": lbc.clients["cluster1"],
}
if err := lbc.RemoveFromClusters(ing, removeClients, false /*forceUpdate*/); err != nil {
t.Errorf("unexpected error in removing existing load balancer from clusters: %s", err)
}
if err := verifyClusters(lbName, lbc.frs, []string{"cluster2"}); err != nil {
t.Errorf("%s", err)
}

// Cleanup.
if err := lbc.DeleteLoadBalancer(ing); err != nil {
t.Fatalf("unexpected error in deleting load balancer: %s", err)
}
}

func verifyClusters(lbName string, frs forwardingrule.ForwardingRuleSyncerInterface, expectedClusters []string) error {
status, err := frs.GetLoadBalancerStatus(lbName)
if err != nil {
return fmt.Errorf("unexpected error in getting load balancer status: %s", err)
}
if !reflect.DeepEqual(status.Clusters, expectedClusters) {
return fmt.Errorf("unexpected list of clusters, expected: %v, got: %v", expectedClusters, status.Clusters)
}
return nil
}

func TestFormatLoadBalancersList(t *testing.T) {
testCases := []struct {
statuses []status.LoadBalancerStatus
Expand Down

0 comments on commit 3c1ae3e

Please sign in to comment.