diff --git a/bidengine/order.go b/bidengine/order.go index 3331c376..95cbfa2c 100644 --- a/bidengine/order.go +++ b/bidengine/order.go @@ -507,7 +507,7 @@ func (o *order) shouldBid(group *dtypes.Group) (bool, error) { return false, nil } - for _, resources := range group.GroupSpec.GetResources() { + for _, resources := range group.GroupSpec.GetResourceUnits() { if len(resources.Resources.Storage) > o.cfg.MaxGroupVolumes { o.log.Info(fmt.Sprintf("unable to fulfill: group volumes count exceeds (%d > %d)", len(resources.Resources.Storage), o.cfg.MaxGroupVolumes)) return false, nil diff --git a/bidengine/order_test.go b/bidengine/order_test.go index ce5f6ecf..4777fe99 100644 --- a/bidengine/order_test.go +++ b/bidengine/order_test.go @@ -60,7 +60,7 @@ var _ BidPricingStrategy = (*alwaysFailsBidPricingStrategy)(nil) func makeMocks(s *orderTestScaffold) { groupResult := &dtypes.QueryGroupResponse{} groupResult.Group.GroupSpec.Name = "testGroupName" - groupResult.Group.GroupSpec.Resources = make([]dtypes.Resource, 1) + groupResult.Group.GroupSpec.Resources = make(dtypes.ResourceUnits, 1) cpu := atypes.CPU{} cpu.Units = atypes.NewResourceValue(uint64(dtypes.GetValidationConfig().MinUnitCPU)) @@ -77,14 +77,15 @@ func makeMocks(s *orderTestScaffold) { }, } - clusterResources := atypes.ResourceUnits{ + clusterResources := atypes.Resources{ + ID: 1, CPU: &cpu, GPU: &gpu, Memory: &memory, Storage: storage, } price := sdk.NewInt64DecCoin(testutil.CoinDenom, 23) - resource := dtypes.Resource{ + resource := dtypes.ResourceUnit{ Resources: clusterResources, Count: 2, Price: price, diff --git a/bidengine/pricing_test.go b/bidengine/pricing_test.go index e947c66f..152c75c9 100644 --- a/bidengine/pricing_test.go +++ b/bidengine/pricing_test.go @@ -61,7 +61,7 @@ func defaultGroupSpecCPUMem() *dtypes.GroupSpec { gspec := &dtypes.GroupSpec{ Name: "", Requirements: atypes.PlacementRequirements{}, - Resources: make([]dtypes.Resource, 1), + Resources: make(dtypes.ResourceUnits, 1), } cpu := atypes.CPU{} @@ -70,13 +70,13 @@ func defaultGroupSpecCPUMem() *dtypes.GroupSpec { memory := atypes.Memory{} memory.Quantity = atypes.NewResourceValue(10000) - clusterResources := atypes.ResourceUnits{ + clusterResources := atypes.Resources{ CPU: &cpu, Memory: &memory, } price := sdk.NewDecCoin("uakt", sdk.NewInt(23)) - resource := dtypes.Resource{ + resource := dtypes.ResourceUnit{ Resources: clusterResources, Count: 1, Price: price, @@ -91,10 +91,10 @@ func defaultGroupSpec() *dtypes.GroupSpec { gspec := &dtypes.GroupSpec{ Name: "", Requirements: atypes.PlacementRequirements{}, - Resources: make([]dtypes.Resource, 1), + Resources: make(dtypes.ResourceUnits, 1), } - clusterResources := atypes.ResourceUnits{ + clusterResources := atypes.Resources{ CPU: &atypes.CPU{ Units: atypes.NewResourceValue(11), }, @@ -111,7 +111,7 @@ func defaultGroupSpec() *dtypes.GroupSpec { }, } price := sdk.NewDecCoin(testutil.CoinDenom, sdk.NewInt(23)) - resource := dtypes.Resource{ + resource := dtypes.ResourceUnit{ Resources: clusterResources, Count: 1, Price: price, diff --git a/cluster/client.go b/cluster/client.go index da72972a..8f10a6d5 100644 --- a/cluster/client.go +++ b/cluster/client.go @@ -172,8 +172,8 @@ type inventory struct { var _ ctypes.Inventory = (*inventory)(nil) func (inv *inventory) Adjust(reservation ctypes.ReservationGroup, opts ...ctypes.InventoryOption) error { - resources := make([]types.Resources, len(reservation.Resources().GetResources())) - copy(resources, reservation.Resources().GetResources()) + resources := make(dtypes.ResourceUnits, len(reservation.Resources().GetResourceUnits())) + copy(resources, reservation.Resources().GetResourceUnits()) currInventory := inv.dup() diff --git a/cluster/inventory.go b/cluster/inventory.go index ba562013..9eddc34d 100644 --- a/cluster/inventory.go +++ b/cluster/inventory.go @@ -138,7 +138,7 @@ func (is *inventoryService) ready() <-chan struct{} { return is.readych } -func (is *inventoryService) lookup(order mtypes.OrderID, resources atypes.ResourceGroup) (ctypes.Reservation, error) { +func (is *inventoryService) lookup(order mtypes.OrderID, resources dtypes.ResourceGroup) (ctypes.Reservation, error) { ch := make(chan inventoryResponse, 1) req := inventoryRequest{ order: order, @@ -155,15 +155,15 @@ func (is *inventoryService) lookup(order mtypes.OrderID, resources atypes.Resour } } -func (is *inventoryService) reserve(order mtypes.OrderID, resources atypes.ResourceGroup) (ctypes.Reservation, error) { - for idx, res := range resources.GetResources() { - if res.Resources.CPU == nil { +func (is *inventoryService) reserve(order mtypes.OrderID, resources dtypes.ResourceGroup) (ctypes.Reservation, error) { + for idx, res := range resources.GetResourceUnits() { + if res.CPU == nil { return nil, fmt.Errorf("%w: CPU resource at idx %d is nil", ErrInvalidResource, idx) } - if res.Resources.GPU == nil { + if res.GPU == nil { return nil, fmt.Errorf("%w: GPU resource at idx %d is nil", ErrInvalidResource, idx) } - if res.Resources.Memory == nil { + if res.Memory == nil { return nil, fmt.Errorf("%w: Memory resource at idx %d is nil", ErrInvalidResource, idx) } } @@ -231,7 +231,7 @@ func (is *inventoryService) status(ctx context.Context) (ctypes.InventoryStatus, type inventoryRequest struct { order mtypes.OrderID - resources atypes.ResourceGroup + resources dtypes.ResourceGroup ch chan<- inventoryResponse } @@ -240,11 +240,12 @@ type inventoryResponse struct { err error } -func (is *inventoryService) resourcesToCommit(rgroup atypes.ResourceGroup) atypes.ResourceGroup { - replacedResources := make([]dtypes.Resource, 0) +func (is *inventoryService) resourcesToCommit(rgroup dtypes.ResourceGroup) dtypes.ResourceGroup { + replacedResources := make(dtypes.ResourceUnits, 0) - for _, resource := range rgroup.GetResources() { - runits := atypes.ResourceUnits{ + for _, resource := range rgroup.GetResourceUnits() { + runits := atypes.Resources{ + ID: resource.ID, CPU: &atypes.CPU{ Units: sdlutil.ComputeCommittedResources(is.config.CPUCommitLevel, resource.Resources.GetCPU().GetUnits()), Attributes: resource.Resources.GetCPU().GetAttributes(), @@ -272,7 +273,7 @@ func (is *inventoryService) resourcesToCommit(rgroup atypes.ResourceGroup) atype runits.Storage = storage - v := dtypes.Resource{ + v := dtypes.ResourceUnit{ Resources: runits, Count: resource.Count, Price: sdk.DecCoin{}, @@ -342,7 +343,7 @@ func updateReservationMetrics(reservations []*reservation) { memoryTotal = &activeMemoryTotal endpointsTotal = &activeEndpointsTotal } - for _, resource := range reservation.Resources().GetResources() { + for _, resource := range reservation.Resources().GetResourceUnits() { *cpuTotal += float64(resource.Resources.GetCPU().GetUnits().Value() * uint64(resource.Count)) *gpuTotal += float64(resource.Resources.GetGPU().GetUnits().Value() * uint64(resource.Count)) *memoryTotal += float64(resource.Resources.GetMemory().Quantity.Value() * uint64(resource.Count)) @@ -389,7 +390,7 @@ func (is *inventoryService) handleRequest(req inventoryRequest, state *inventory reservation := newReservation(req.order, resourcesToCommit) { - jReservation, _ := json.Marshal(req.resources.GetResources()) + jReservation, _ := json.Marshal(req.resources.GetResourceUnits()) is.log.Debug("reservation requested", "order", req.order, fmt.Sprintf("resources=%s", jReservation)) } @@ -727,7 +728,7 @@ func (is *inventoryService) getStatus(state *inventoryServiceState) ctypes.Inven Storage: make(map[string]int64), } - for _, resources := range reservation.Resources().GetResources() { + for _, resources := range reservation.Resources().GetResourceUnits() { total.AddResources(resources) } @@ -751,7 +752,7 @@ func (is *inventoryService) getStatus(state *inventoryServiceState) ctypes.Inven func reservationCountEndpoints(reservation *reservation) uint { var externalPortCount uint - resources := reservation.Resources().GetResources() + resources := reservation.Resources().GetResourceUnits() // Count the number of endpoints per resource. The number of instances does not affect // the number of ports for _, resource := range resources { diff --git a/cluster/inventory_test.go b/cluster/inventory_test.go index 41c3495c..5e618f65 100644 --- a/cluster/inventory_test.go +++ b/cluster/inventory_test.go @@ -67,10 +67,11 @@ func newInventory(nodes ...string) ctypes.Inventory { } func TestInventory_reservationAllocatable(t *testing.T) { - mkrg := func(cpu uint64, gpu uint64, memory uint64, storage uint64, endpointsCount uint, count uint32) dtypes.Resource { + mkrg := func(cpu uint64, gpu uint64, memory uint64, storage uint64, endpointsCount uint, count uint32) dtypes.ResourceUnit { endpoints := make([]types.Endpoint, endpointsCount) - return dtypes.Resource{ - Resources: types.ResourceUnits{ + return dtypes.ResourceUnit{ + Resources: types.Resources{ + ID: 1, CPU: &types.CPU{ Units: types.NewResourceValue(cpu), }, @@ -91,7 +92,7 @@ func TestInventory_reservationAllocatable(t *testing.T) { } } - mkres := func(allocated bool, res ...dtypes.Resource) *reservation { + mkres := func(allocated bool, res ...dtypes.ResourceUnit) *reservation { return &reservation{ allocated: allocated, resources: &dtypes.GroupSpec{Resources: res}, @@ -189,7 +190,8 @@ func TestInventory_ClusterDeploymentDeployed(t *testing.T) { groupServices[0] = manifest.Service{ Count: 1, - Resources: types.ResourceUnits{ + Resources: types.Resources{ + ID: 1, CPU: &types.CPU{ Units: types.NewResourceValue(1), }, @@ -327,7 +329,8 @@ func makeInventoryScaffold(t *testing.T, leaseQty uint, inventoryCall bool, node } } - deploymentRequirements := types.ResourceUnits{ + deploymentRequirements := types.Resources{ + ID: 1, CPU: &types.CPU{ Units: types.NewResourceValue(4000), }, @@ -397,7 +400,8 @@ func makeGroupForInventoryTest(sharedHTTP, nodePort, leasedIP bool) manifest.Gro serviceEndpoints = append(serviceEndpoints, serviceEndpoint) } - deploymentRequirements := types.ResourceUnits{ + deploymentRequirements := types.Resources{ + ID: 1, CPU: &types.CPU{ Units: types.NewResourceValue(4000), }, @@ -619,7 +623,7 @@ func TestInventory_OverReservations(t *testing.T) { } } - deploymentRequirements := types.ResourceUnits{ + deploymentRequirements := types.Resources{ CPU: &types.CPU{ Units: types.NewResourceValue(4000), }, diff --git a/cluster/kube/apply.go b/cluster/kube/apply.go index def2b902..7e72e596 100644 --- a/cluster/kube/apply.go +++ b/cluster/kube/apply.go @@ -12,7 +12,6 @@ import ( metricsutils "github.com/akash-network/node/util/metrics" "github.com/akash-network/provider/cluster/kube/builder" - crd "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" crdapi "github.com/akash-network/provider/pkg/client/clientset/versioned" ) @@ -154,7 +153,7 @@ func applyService(ctx context.Context, kc kubernetes.Interface, b builder.Servic return err } -func applyManifest(ctx context.Context, kc crdapi.Interface, b builder.Manifest) (*crd.Manifest, error) { +func applyManifest(ctx context.Context, kc crdapi.Interface, b builder.Manifest) error { obj, err := kc.AkashV2beta2().Manifests(b.NS()).Get(ctx, b.Name(), metav1.GetOptions{}) metricsutils.IncCounterVecWithLabelValuesFiltered(kubeCallsCounter, "akash-manifests-get", err, errors.IsNotFound) @@ -175,5 +174,5 @@ func applyManifest(ctx context.Context, kc crdapi.Interface, b builder.Manifest) } } - return obj, err + return err } diff --git a/cluster/kube/builder/builder.go b/cluster/kube/builder/builder.go index 915563ea..e970b8ed 100644 --- a/cluster/kube/builder/builder.go +++ b/cluster/kube/builder/builder.go @@ -1,6 +1,7 @@ package builder import ( + "errors" "fmt" "reflect" "strconv" @@ -49,6 +50,10 @@ const ( envVarAkashClusterPublicHostname = "AKASH_CLUSTER_PUBLIC_HOSTNAME" ) +var ( + ErrKubeBuilder = errors.New("kube-builder") +) + var ( dnsPort = intstr.FromInt(53) udpProtocol = corev1.Protocol("UDP") @@ -73,7 +78,8 @@ func ClusterDeploymentFromDeployment(d ctypes.IDeployment) (IClusterDeployment, cparams, valid := d.ClusterParams().(crd.ClusterSettings) if !valid { // nolint: goerr113 - return nil, fmt.Errorf("kube-cluster: unexpected type from ClusterParams(). expected (%s), actual (%s)", + return nil, fmt.Errorf("%w: unexpected type from ClusterParams(). expected (%s), actual (%s)", + ErrKubeBuilder, reflect.TypeOf(crd.ClusterSettings{}), reflect.TypeOf(d.ClusterParams()), ) @@ -85,6 +91,10 @@ func ClusterDeploymentFromDeployment(d ctypes.IDeployment) (IClusterDeployment, Sparams: cparams, } + if err := cd.validate(); err != nil { + return cd, err + } + return cd, nil } @@ -100,6 +110,42 @@ func (d *ClusterDeployment) ClusterParams() crd.ClusterSettings { return d.Sparams } +func (d *ClusterDeployment) validate() error { + if len(d.Group.Services) != len(d.Sparams.SchedulerParams) { + return fmt.Errorf("%w: group services count does not match scheduler params count (%d) != (%d)", + ErrKubeBuilder, + len(d.Group.Services), + len(d.Sparams.SchedulerParams)) + } + + for idx := range d.Group.Services { + svc := d.Group.Services[idx] + sParams := d.Sparams.SchedulerParams[idx] + + if svc.Resources.CPU == nil { + return fmt.Errorf("%w: service %s. resource CPU cannot be nil", ErrKubeBuilder, svc.Name) + } + + if svc.Resources.GPU == nil { + return fmt.Errorf("%w: service %s. resource GPU cannot be nil", ErrKubeBuilder, svc.Name) + } + + if svc.Resources.Memory == nil { + return fmt.Errorf("%w: service %s. resource Memory cannot be nil", ErrKubeBuilder, svc.Name) + } + + if svc.Resources.GPU.Units.Value() > 0 { + if sParams == nil || + sParams.Resources == nil || + sParams.Resources.GPU == nil { + return fmt.Errorf("%w: service %s. SchedulerParams.Resources.GPU must not be nil when GPU > 0", ErrKubeBuilder, svc.Name) + } + } + } + + return nil +} + type builderBase interface { NS() string Name() string diff --git a/cluster/kube/builder/netpol.go b/cluster/kube/builder/netpol.go index b62bbac8..d0598941 100644 --- a/cluster/kube/builder/netpol.go +++ b/cluster/kube/builder/netpol.go @@ -9,7 +9,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" manitypes "github.com/akash-network/akash-api/go/manifest/v2beta2" - sdlutil "github.com/akash-network/node/sdl/util" ) type NetPol interface { @@ -165,7 +164,7 @@ func (b *netPol) Create() ([]*netv1.NetworkPolicy, error) { // nolint:golint,unp portsWithIP = append(portsWithIP, entry) } - if !expose.Global || sdlutil.ShouldBeIngress(expose) { + if !expose.Global || expose.IsIngress() { continue } diff --git a/cluster/kube/builder/service.go b/cluster/kube/builder/service.go index e641281c..fb8009de 100644 --- a/cluster/kube/builder/service.go +++ b/cluster/kube/builder/service.go @@ -9,7 +9,6 @@ import ( "k8s.io/apimachinery/pkg/util/intstr" manitypes "github.com/akash-network/akash-api/go/manifest/v2beta2" - sdlutil "github.com/akash-network/node/sdl/util" ) type Service interface { @@ -109,12 +108,11 @@ func (b *service) Any() bool { service := &b.deployment.ManifestGroup().Services[b.serviceIdx] for _, expose := range service.Expose { - exposeIsIngress := sdlutil.ShouldBeIngress(expose) - if b.requireNodePort && exposeIsIngress { + if b.requireNodePort && expose.IsIngress() { continue } - if !b.requireNodePort && exposeIsIngress { + if !b.requireNodePort && expose.IsIngress() { return true } @@ -134,9 +132,8 @@ func (b *service) ports() ([]corev1.ServicePort, error) { ports := make([]corev1.ServicePort, 0, len(service.Expose)) portsAdded := make(map[int32]struct{}) for i, expose := range service.Expose { - shouldBeIngress := sdlutil.ShouldBeIngress(expose) - if expose.Global == b.requireNodePort || (!b.requireNodePort && shouldBeIngress) { - if b.requireNodePort && shouldBeIngress { + if expose.Global == b.requireNodePort || (!b.requireNodePort && expose.IsIngress()) { + if b.requireNodePort && expose.IsIngress() { continue } @@ -149,7 +146,7 @@ func (b *service) ports() ([]corev1.ServicePort, error) { default: return nil, errUnsupportedProtocol } - externalPort := sdlutil.ExposeExternalPort(service.Expose[i]) + externalPort := expose.GetExternalPort() _, added := portsAdded[externalPort] if !added { portsAdded[externalPort] = struct{}{} diff --git a/cluster/kube/client.go b/cluster/kube/client.go index 88691e7b..4bdcd18a 100644 --- a/cluster/kube/client.go +++ b/cluster/kube/client.go @@ -7,13 +7,13 @@ import ( "io" "strings" - "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" "github.com/tendermint/tendermint/libs/log" appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" eventsv1 "k8s.io/api/events/v1" + kerrors "k8s.io/apimachinery/pkg/api/errors" kubeErrors "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/version" @@ -27,7 +27,6 @@ import ( dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" "github.com/akash-network/node/sdl" - sdlutil "github.com/akash-network/node/sdl/util" metricsutils "github.com/akash-network/node/util/metrics" "github.com/akash-network/provider/cluster" @@ -82,28 +81,28 @@ func wrapKubeCall[T any](label string, fn func() (T, error)) (T, error) { func NewClient(ctx context.Context, log log.Logger, ns string, configPath string) (Client, error) { config, err := clientcommon.OpenKubeConfig(configPath, log) if err != nil { - return nil, errors.Wrap(err, "kube: error building config flags") + return nil, fmt.Errorf("kube: error building config flags: %w", err) } config.RateLimiter = flowcontrol.NewFakeAlwaysRateLimiter() kc, err := kubernetes.NewForConfig(config) if err != nil { - return nil, errors.Wrap(err, "kube: error creating kubernetes client") + return nil, fmt.Errorf("kube: error creating kubernetes client: %w", err) } _, err = kc.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) if err != nil { - return nil, errors.Wrap(err, "kube: unable to fetch leases namespace") + return nil, fmt.Errorf("kube: unable to fetch leases namespace: %w", err) } mc, err := akashclient.NewForConfig(config) if err != nil { - return nil, errors.Wrap(err, "kube: error creating manifest client") + return nil, fmt.Errorf("kube: error creating manifest client: %w", err) } metc, err := metricsclient.NewForConfig(config) if err != nil { - return nil, errors.Wrap(err, "kube: error creating metrics client") + return nil, fmt.Errorf("kube: error creating metrics client: %w", err) } return &client{ @@ -191,63 +190,89 @@ func (c *client) Deployments(ctx context.Context) ([]ctypes.IDeployment, error) return deployments, nil } -func (c *client) Deploy(ctx context.Context, deployment ctypes.IDeployment) error { - cdeployment, err := builder.ClusterDeploymentFromDeployment(deployment) - if err != nil { - return err - } +type deploymentService struct { + deployment builder.Deployment + statefulSet builder.StatefulSet + localService builder.Service + globalService builder.Service +} - lid := cdeployment.LeaseID() - group := cdeployment.ManifestGroup() +type deploymentApplies struct { + ns builder.NS + netPol builder.NetPol + cmanifest builder.Manifest + services []deploymentService +} - settingsI := ctx.Value(builder.SettingsKey) - if nil == settingsI { - return kubeclienterrors.ErrNotConfiguredWithSettings - } - settings := settingsI.(builder.Settings) - if err := builder.ValidateSettings(settings); err != nil { - return err +func (c *client) Deploy(ctx context.Context, deployment ctypes.IDeployment) (err error) { + var settings builder.Settings + var valid bool + + if settings, valid = ctx.Value(builder.SettingsKey).(builder.Settings); !valid { + err = kubeclienterrors.ErrNotConfiguredWithSettings + return } - if err := applyNS(ctx, c.kc, builder.BuildNS(settings, cdeployment)); err != nil { - c.log.Error("applying namespace", "err", err, "lease", lid) - return err + if err = builder.ValidateSettings(settings); err != nil { + return } - if err := applyNetPolicies(ctx, c.kc, builder.BuildNetPol(settings, cdeployment)); err != nil { // - c.log.Error("applying namespace network policies", "err", err, "lease", lid) - return err + var cdeployment builder.IClusterDeployment + + if cdeployment, err = builder.ClusterDeploymentFromDeployment(deployment); err != nil { + if cdeployment != nil { + tMani := builder.BuildManifest(c.log, settings, c.ns, cdeployment) + + if cr, er := tMani.Create(); er == nil { + data, _ := json.Marshal(cr) + c.log.Error(fmt.Sprintf("debug manifest %s", string(data))) + } + } + + return } - cmanifest := builder.BuildManifest(c.log, settings, c.ns, cdeployment) - crdManifest, err := applyManifest(ctx, c.ac, cmanifest) - if err != nil { - c.log.Error("applying manifest", "err", err, "lease", lid) - return err + lid := cdeployment.LeaseID() + group := cdeployment.ManifestGroup() + + applies := deploymentApplies{ + services: make([]deploymentService, 0, len(group.Services)), } - // log actual stored manifest if deployment fails defer func() { - if err != nil { - jData, err := json.Marshal(crdManifest) - if err == nil { - c.log.Error("while deploying", "err", err, fmt.Sprintf("manifest=%s", string(jData))) - } else { - c.log.Error("dump manifest crd while deploying", "err", err) + tmpErr := err + + if r := recover(); r != nil { + c.log.Error(fmt.Sprintf("recovered from panic: %v", r)) + err = kubeclienterrors.ErrInternalError + } + + if tmpErr != nil || err != nil { + var dArgs []any + var dMsg string + + applyMsgLog := func(msg string, arg any) { + dMsg += msg + dArgs = append(dArgs, arg) } + + applyMsgLog("unable to deploy lid=%s. last known state:\n", lid) + + c.log.Error(fmt.Sprintf(dMsg, dArgs...)) } }() - if err = cleanupStaleResources(ctx, c.kc, lid, group); err != nil { - c.log.Error("cleaning stale resources", "err", err, "lease", lid) - return err - } + applies.ns = builder.BuildNS(settings, cdeployment) + applies.netPol = builder.BuildNetPol(settings, cdeployment) + applies.cmanifest = builder.BuildManifest(c.log, settings, c.ns, cdeployment) for svcIdx := range group.Services { workload := builder.NewWorkloadBuilder(c.log, settings, cdeployment, svcIdx) service := &group.Services[svcIdx] + svc := deploymentService{} + persistent := false for i := range service.Resources.Storage { attrVal := service.Resources.Storage[i].Attributes.Find(sdl.StorageAttributePersistent) @@ -257,15 +282,9 @@ func (c *client) Deploy(ctx context.Context, deployment ctypes.IDeployment) erro } if persistent { - if err = applyStatefulSet(ctx, c.kc, builder.BuildStatefulSet(workload)); err != nil { - c.log.Error("applying statefulSet", "err", err, "lease", lid, "service", service.Name) - return err - } + svc.statefulSet = builder.BuildStatefulSet(workload) } else { - if err = applyDeployment(ctx, c.kc, builder.NewDeployment(workload)); err != nil { - c.log.Error("applying deployment", "err", err, "lease", lid, "service", service.Name) - return err - } + svc.deployment = builder.NewDeployment(workload) } if len(service.Expose) == 0 { @@ -273,17 +292,60 @@ func (c *client) Deploy(ctx context.Context, deployment ctypes.IDeployment) erro continue } - serviceBuilderLocal := builder.BuildService(workload, false) - if serviceBuilderLocal.Any() { - if err = applyService(ctx, c.kc, serviceBuilderLocal); err != nil { + svc.localService = builder.BuildService(workload, false) + svc.globalService = builder.BuildService(workload, true) + + applies.services = append(applies.services, svc) + } + + if err := applyNS(ctx, c.kc, applies.ns); err != nil { + c.log.Error("applying namespace", "err", err, "lease", lid) + return err + } + + if err := applyNetPolicies(ctx, c.kc, applies.netPol); err != nil { // + c.log.Error("applying namespace network policies", "err", err, "lease", lid) + return err + } + + err = applyManifest(ctx, c.ac, applies.cmanifest) + if err != nil { + c.log.Error("applying manifest", "err", err, "lease", lid) + return err + } + + if err = cleanupStaleResources(ctx, c.kc, lid, group); err != nil { + c.log.Error("cleaning stale resources", "err", err, "lease", lid) + return err + } + + for svcIdx := range group.Services { + applyObjs := &applies.services[svcIdx] + service := &group.Services[svcIdx] + + if applyObjs.statefulSet != nil { + if err = applyStatefulSet(ctx, c.kc, applyObjs.statefulSet); err != nil { + c.log.Error("applying statefulSet", "err", err, "lease", lid, "service", service.Name) + return err + } + } + + if applyObjs.deployment != nil { + if err = applyDeployment(ctx, c.kc, applyObjs.deployment); err != nil { + c.log.Error("applying deployment", "err", err, "lease", lid, "service", service.Name) + return err + } + } + + if applyObjs.localService.Any() { + if err = applyService(ctx, c.kc, applyObjs.localService); err != nil { c.log.Error("applying local service", "err", err, "lease", lid, "service", service.Name) return err } } - serviceBuilderGlobal := builder.BuildService(workload, true) - if serviceBuilderGlobal.Any() { - if err = applyService(ctx, c.kc, serviceBuilderGlobal); err != nil { + if applyObjs.globalService.Any() { + if err = applyService(ctx, c.kc, applyObjs.globalService); err != nil { c.log.Error("applying global service", "err", err, "lease", lid, "service", service.Name) return err } @@ -298,6 +360,12 @@ func (c *client) TeardownLease(ctx context.Context, lid mtypes.LeaseID) error { return nil, c.kc.CoreV1().Namespaces().Delete(ctx, builder.LidNS(lid), metav1.DeleteOptions{}) }) + if result != nil { + c.log.Error("teardown lease: unable to delete namespace", "ns", builder.LidNS(lid), "error", result) + if kerrors.IsNotFound(result) { + result = nil + } + } _, err := wrapKubeCall("manifests-delete", func() (interface{}, error) { return nil, c.ac.AkashV2beta2().Manifests(c.ns).Delete(ctx, builder.LidNS(lid), metav1.DeleteOptions{}) }) @@ -417,7 +485,7 @@ func (c *client) LeaseLogs(ctx context.Context, lid mtypes.LeaseID, }) if err != nil { c.log.Error("listing pods", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } streams := make([]*ctypes.ServiceLog, len(pods.Items)) for i, pod := range pods.Items { @@ -431,7 +499,7 @@ func (c *client) LeaseLogs(ctx context.Context, lid mtypes.LeaseID, if err != nil { c.log.Error("get pod logs", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } streams[i] = cluster.NewServiceLog(pod.Name, stream) } @@ -453,7 +521,7 @@ func (c *client) ForwardedPortStatus(ctx context.Context, leaseID mtypes.LeaseID }) if err != nil { c.log.Error("list services", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } forwardedPorts := make(map[string][]ctypes.ForwardedPortStatus) @@ -582,7 +650,7 @@ func (c *client) ServiceStatus(ctx context.Context, lid mtypes.LeaseID, name str if err != nil { c.log.Error("deployment get", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } if deployment == nil { c.log.Error("no deployment found", "name", name) @@ -607,7 +675,7 @@ func (c *client) ServiceStatus(ctx context.Context, lid mtypes.LeaseID, name str if err != nil { c.log.Error("statefulsets get", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } if statefulset == nil { c.log.Error("no statefulsets found", "name", name) @@ -647,7 +715,7 @@ exposeCheckLoop: Global: expose.Global, Hosts: expose.Hosts, } - if sdlutil.ShouldBeIngress(mse) { + if mse.IsIngress() { hasHostnames = true break exposeCheckLoop } @@ -673,7 +741,7 @@ exposeCheckLoop: if err != nil { c.log.Error("provider hosts get", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } hosts := make([]string, 0, len(phs.Items)) @@ -698,7 +766,7 @@ func (c *client) leaseExists(ctx context.Context, lid mtypes.LeaseID) error { } c.log.Error("namespaces get", "err", err) - return errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } return nil @@ -715,7 +783,7 @@ func (c *client) deploymentsForLease(ctx context.Context, lid mtypes.LeaseID) (m if err != nil { c.log.Error("deployments list", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } statefulsets, err := wrapKubeCall("statefulsets-list", func() (*appsv1.StatefulSetList, error) { @@ -724,7 +792,7 @@ func (c *client) deploymentsForLease(ctx context.Context, lid mtypes.LeaseID) (m if err != nil { c.log.Error("statefulsets list", "err", err) - return nil, errors.Wrap(err, kubeclienterrors.ErrInternalError.Error()) + return nil, fmt.Errorf("%s: %w", kubeclienterrors.ErrInternalError.Error(), err) } serviceStatus := make(map[string]*ctypes.ServiceStatus) diff --git a/cluster/kube/client_test.go b/cluster/kube/client_test.go index bf089560..2e704ae8 100644 --- a/cluster/kube/client_test.go +++ b/cluster/kube/client_test.go @@ -538,7 +538,7 @@ func TestServiceStatusWithIngress(t *testing.T) { Command: nil, Args: nil, Env: nil, - Resources: types.ResourceUnits{}, + Resources: types.Resources{}, Count: 1, Expose: []manifest.ServiceExpose{ { @@ -557,7 +557,7 @@ func TestServiceStatusWithIngress(t *testing.T) { Command: nil, Args: nil, Env: nil, - Resources: types.ResourceUnits{}, + Resources: types.Resources{}, Count: 1, Expose: []manifest.ServiceExpose{ { @@ -623,7 +623,7 @@ func TestServiceStatusWithNoManifest(t *testing.T) { Command: nil, Args: nil, Env: nil, - Resources: types.ResourceUnits{}, + Resources: types.Resources{}, Count: 1, Expose: []manifest.ServiceExpose{ { @@ -642,7 +642,7 @@ func TestServiceStatusWithNoManifest(t *testing.T) { Command: nil, Args: nil, Env: nil, - Resources: types.ResourceUnits{}, + Resources: types.Resources{}, Count: 1, Expose: []manifest.ServiceExpose{ { @@ -697,7 +697,7 @@ func TestServiceStatusWithoutIngress(t *testing.T) { Command: nil, Args: nil, Env: nil, - Resources: types.ResourceUnits{}, + Resources: types.Resources{}, Count: 1, Expose: []manifest.ServiceExpose{ { @@ -716,7 +716,7 @@ func TestServiceStatusWithoutIngress(t *testing.T) { Command: nil, Args: nil, Env: nil, - Resources: types.ResourceUnits{}, + Resources: types.Resources{}, Count: 1, Expose: []manifest.ServiceExpose{ { diff --git a/cluster/kube/invendory_node.go b/cluster/kube/invendory_node.go index 80560e53..e7d0b43d 100644 --- a/cluster/kube/invendory_node.go +++ b/cluster/kube/invendory_node.go @@ -162,21 +162,6 @@ func sParamsEnsureResources(sparams *crd.SchedulerParams) { } } -// -// func sParamsEnsureAffinityNode(sparams *crd.SchedulerParams) { -// sParamsEnsureAffinity(sparams) -// -// if sparams.Affinity.Node == nil { -// sparams.Affinity.Node = &crd.NodeAffinity{} -// } -// } -// -// func sParamsApplySelectors(sparams *crd.SchedulerParams, selectors []corev1.NodeSelectorRequirement) { -// sParamsEnsureAffinityNode(sparams) -// -// sparams.Affinity.Node.Required = append(sparams.Affinity.Node.Required, selectors...) -// } - func (nd *node) tryAdjustMemory(res *types.Memory) bool { return nd.memory.subNLZ(res.Quantity) } diff --git a/cluster/kube/inventory.go b/cluster/kube/inventory.go index 59c7475b..d73e7a4d 100644 --- a/cluster/kube/inventory.go +++ b/cluster/kube/inventory.go @@ -2,17 +2,21 @@ package kube import ( "context" + "encoding/json" "fmt" "reflect" "strings" "time" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" corev1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/client-go/tools/pager" + "github.com/tendermint/tendermint/libs/log" + types "github.com/akash-network/akash-api/go/node/types/v1beta3" "github.com/akash-network/provider/cluster/kube/builder" @@ -29,14 +33,16 @@ type clusterNodes map[string]*node type inventory struct { storageClasses clusterStorage nodes clusterNodes + log log.Logger } var _ ctypes.Inventory = (*inventory)(nil) -func newInventory(storage clusterStorage, nodes map[string]*node) *inventory { +func newInventory(log log.Logger, storage clusterStorage, nodes map[string]*node) *inventory { inv := &inventory{ storageClasses: storage, nodes: nodes, + log: log, } return inv @@ -46,6 +52,7 @@ func (inv *inventory) dup() inventory { dup := inventory{ storageClasses: inv.storageClasses.dup(), nodes: inv.nodes.dup(), + log: inv.log, } return dup @@ -54,7 +61,7 @@ func (inv *inventory) dup() inventory { // tryAdjust cluster inventory // It returns two boolean values. First indicates if node-wide resources satisfy (true) requirements // Seconds indicates if cluster-wide resources satisfy (true) requirements -func (inv *inventory) tryAdjust(node string, res *types.ResourceUnits) (*crd.SchedulerParams, bool, bool) { +func (inv *inventory) tryAdjust(node string, res *types.Resources) (*crd.SchedulerParams, bool, bool) { nd := inv.nodes[node].dup() sparams := &crd.SchedulerParams{} @@ -119,23 +126,46 @@ func (inv *inventory) Adjust(reservation ctypes.ReservationGroup, opts ...ctypes cfg = opt(cfg) } - resources := make([]types.Resources, len(reservation.Resources().GetResources())) - adjustedResources := make([]types.Resources, 0, len(reservation.Resources().GetResources())) - copy(resources, reservation.Resources().GetResources()) + origResources := reservation.Resources().GetResourceUnits() + resources := make(dtypes.ResourceUnits, 0, len(origResources)) + adjustedResources := make(dtypes.ResourceUnits, 0, len(origResources)) + + for _, res := range origResources { + resources = append(resources, dtypes.ResourceUnit{ + Resources: res.Resources.Dup(), + Count: res.Count, + }) + + adjustedResources = append(adjustedResources, dtypes.ResourceUnit{ + Resources: res.Resources.Dup(), + Count: res.Count, + }) + } cparams := crd.ClusterSettings{ - SchedulerParams: make([]*crd.SchedulerParams, len(reservation.Resources().GetResources())), + SchedulerParams: make([]*crd.SchedulerParams, len(reservation.Resources().GetResourceUnits())), } currInventory := inv.dup() + var err error + nodes: for nodeName := range currInventory.nodes { for i := len(resources) - 1; i >= 0; i-- { - adjusted := resources[i] + adjustedGroup := false + + var adjusted *types.Resources + if origResources[i].Count == resources[i].Count { + adjusted = &adjustedResources[i].Resources + } else { + adjustedGroup = true + res := adjustedResources[i].Resources.Dup() + adjusted = &res + } for ; resources[i].Count > 0; resources[i].Count-- { - sparams, nStatus, cStatus := currInventory.tryAdjust(nodeName, &adjusted.Resources) + sparams, nStatus, cStatus := currInventory.tryAdjust(nodeName, adjusted) if !cStatus { // cannot satisfy cluster-wide resources, stop lookup break nodes @@ -146,14 +176,36 @@ nodes: continue nodes } - if sparams != nil { - if cparams.SchedulerParams[i] == nil { - cparams.SchedulerParams[i] = sparams - } else if !reflect.DeepEqual(sparams, cparams.SchedulerParams[i]) { - // all replicas of the same service are expected to have same node selectors and runtimes - // if they don't match then provider cannot bid + // at this point we expect all replicas of the same service to produce + // same adjusted resource units as well as cluster params + if adjustedGroup { + if !reflect.DeepEqual(adjusted, &adjustedResources[i].Resources) { + jFirstAdjusted, _ := json.Marshal(&adjustedResources[i].Resources) + jCurrAdjusted, _ := json.Marshal(adjusted) + + inv.log.Error(fmt.Sprintf("resource mismatch between replicas within group:\n"+ + "\tfirst adjusted replica: %s\n"+ + "\tcurr adjusted replica: %s", string(jFirstAdjusted), string(jCurrAdjusted))) + + err = ctypes.ErrGroupResourceMismatch break nodes } + + // all replicas of the same service are expected to have same node selectors and runtimes + // if they don't match then provider cannot bid + if !reflect.DeepEqual(sparams, cparams.SchedulerParams[i]) { + jFirstSparams, _ := json.Marshal(cparams.SchedulerParams[i]) + jCurrSparams, _ := json.Marshal(sparams) + + inv.log.Error(fmt.Sprintf("scheduler params mismatch between replicas within group:\n"+ + "\tfirst replica: %s\n"+ + "\tcurr replica: %s", string(jFirstSparams), string(jCurrSparams))) + + err = ctypes.ErrGroupResourceMismatch + break nodes + } + } else { + cparams.SchedulerParams[i] = sparams } } @@ -161,7 +213,7 @@ nodes: // remove group from the list to prevent double request of the same resources if resources[i].Count == 0 { resources = append(resources[:i], resources[i+1:]...) - adjustedResources = append(adjustedResources, adjusted) + goto nodes } } } @@ -177,6 +229,10 @@ nodes: return nil } + if err != nil { + return err + } + return ctypes.ErrInsufficientCapacity } @@ -272,7 +328,7 @@ func (c *client) Inventory(ctx context.Context) (ctypes.Inventory, error) { return nil, err } - return newInventory(cstorage, knodes), nil + return newInventory(c.log.With("kube", "inventory"), cstorage, knodes), nil } func (c *client) fetchStorage(ctx context.Context) (clusterStorage, error) { diff --git a/cluster/kube/inventory_test.go b/cluster/kube/inventory_test.go index 4a09e433..84ccd316 100644 --- a/cluster/kube/inventory_test.go +++ b/cluster/kube/inventory_test.go @@ -18,6 +18,7 @@ import ( atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" "github.com/akash-network/node/testutil" + "github.com/akash-network/provider/cluster/kube/builder" ctypes "github.com/akash-network/provider/cluster/types/v1beta3" crd "github.com/akash-network/provider/pkg/apis/akash.network/v2beta2" akashclientfake "github.com/akash-network/provider/pkg/client/clientset/versioned/fake" @@ -28,7 +29,7 @@ import ( type testReservation struct { resources dtypes.GroupSpec - adjustedResources []atypes.Resources + adjustedResources dtypes.ResourceUnits } var _ ctypes.Reservation = (*testReservation)(nil) @@ -37,11 +38,11 @@ func (r *testReservation) OrderID() mtypes.OrderID { return mtypes.OrderID{} } -func (r *testReservation) Resources() atypes.ResourceGroup { +func (r *testReservation) Resources() dtypes.ResourceGroup { return r.resources } -func (r *testReservation) SetAllocatedResources(val []atypes.Resources) { +func (r *testReservation) SetAllocatedResources(val dtypes.ResourceUnits) { r.adjustedResources = val } @@ -359,7 +360,7 @@ func TestInventoryMultipleReplicasFulFilled1(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(100000, 2)) + err = inv.Adjust(multipleReplicasGenReservations(100000, 0, 2)) require.NoError(t, err) } @@ -381,7 +382,7 @@ func TestInventoryMultipleReplicasFulFilled2(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(68780, 4)) + err = inv.Adjust(multipleReplicasGenReservations(68780, 0, 4)) require.NoError(t, err) } @@ -403,7 +404,7 @@ func TestInventoryMultipleReplicasFulFilled3(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(68800, 3)) + err = inv.Adjust(multipleReplicasGenReservations(68800, 0, 3)) require.NoError(t, err) } @@ -425,7 +426,7 @@ func TestInventoryMultipleReplicasFulFilled4(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(119495, 2)) + err = inv.Adjust(multipleReplicasGenReservations(119495, 0, 2)) require.NoError(t, err) } @@ -447,7 +448,51 @@ func TestInventoryMultipleReplicasFulFilled5(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(68780, 1)) + err = inv.Adjust(multipleReplicasGenReservations(68780, 0, 1)) + require.NoError(t, err) +} + +func TestInventoryMultipleReplicasFulFilled6(t *testing.T) { + s := makeInventoryScaffold() + + nodeList := &v1.NodeList{ + Items: multipleReplicasGenNodes(), + } + + podList := &v1.PodList{Items: []v1.Pod{}} + + s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) + s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + + clientInterface := clientForTest(t, s.kmock, s.amock) + inv, err := clientInterface.Inventory(context.Background()) + require.NoError(t, err) + require.NotNil(t, inv) + require.Len(t, inv.Metrics().Nodes, 4) + + err = inv.Adjust(multipleReplicasGenReservations(68780, 1, 1)) + require.NoError(t, err) +} + +func TestInventoryMultipleReplicasFulFilled7(t *testing.T) { + s := makeInventoryScaffold() + + nodeList := &v1.NodeList{ + Items: multipleReplicasGenNodes(), + } + + podList := &v1.PodList{Items: []v1.Pod{}} + + s.nodeInterfaceMock.On("List", mock.Anything, mock.Anything).Return(nodeList, nil) + s.podInterfaceMock.On("List", mock.Anything, mock.Anything).Return(podList, nil) + + clientInterface := clientForTest(t, s.kmock, s.amock) + inv, err := clientInterface.Inventory(context.Background()) + require.NoError(t, err) + require.NotNil(t, inv) + require.Len(t, inv.Metrics().Nodes, 4) + + err = inv.Adjust(multipleSvcReplicasGenReservations(68700, 1, 1)) require.NoError(t, err) } @@ -469,7 +514,7 @@ func TestInventoryMultipleReplicasOutOfCapacity1(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(70000, 4)) + err = inv.Adjust(multipleReplicasGenReservations(70000, 0, 4)) require.Error(t, err) require.EqualError(t, ctypes.ErrInsufficientCapacity, err.Error()) } @@ -492,7 +537,7 @@ func TestInventoryMultipleReplicasOutOfCapacity2(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(100000, 3)) + err = inv.Adjust(multipleReplicasGenReservations(100000, 0, 3)) require.Error(t, err) require.EqualError(t, ctypes.ErrInsufficientCapacity, err.Error()) } @@ -515,7 +560,7 @@ func TestInventoryMultipleReplicasOutOfCapacity4(t *testing.T) { require.NotNil(t, inv) require.Len(t, inv.Metrics().Nodes, 4) - err = inv.Adjust(multipleReplicasGenReservations(119525, 2)) + err = inv.Adjust(multipleReplicasGenReservations(119525, 0, 2)) require.Error(t, err) require.EqualError(t, ctypes.ErrInsufficientCapacity, err.Error()) } @@ -585,11 +630,15 @@ func multipleReplicasGenNodes() []v1.Node { TypeMeta: metav1.TypeMeta{}, ObjectMeta: metav1.ObjectMeta{ Name: "node2", + Labels: map[string]string{ + "akash.network/capabilities.gpu.vendor.nvidia.model.a100": "true", + }, }, Spec: v1.NodeSpec{}, Status: v1.NodeStatus{ Allocatable: v1.ResourceList{ v1.ResourceCPU: *(resource.NewMilliQuantity(68800, resource.DecimalSI)), + builder.ResourceGPUNvidia: *(resource.NewQuantity(2, resource.DecimalSI)), v1.ResourceMemory: *(resource.NewQuantity(457328218112, resource.DecimalSI)), v1.ResourceEphemeralStorage: *(resource.NewQuantity(7752161163113, resource.DecimalSI)), }, @@ -632,14 +681,62 @@ func multipleReplicasGenNodes() []v1.Node { } } -func multipleReplicasGenReservations(cpuUnits uint64, count uint32) *testReservation { +func multipleReplicasGenReservations(cpuUnits, gpuUnits uint64, count uint32) *testReservation { + var gpuAttributes atypes.Attributes + if gpuUnits > 0 { + gpuAttributes = append(gpuAttributes, atypes.Attribute{ + Key: "vendor/nvidia/model/a100", + Value: "true", + }) + } + return &testReservation{ + resources: dtypes.GroupSpec{ + Name: "bla", + Requirements: atypes.PlacementRequirements{}, + Resources: dtypes.ResourceUnits{ + { + Resources: atypes.Resources{ + ID: 1, + CPU: &atypes.CPU{ + Units: atypes.NewResourceValue(cpuUnits), + }, + GPU: &atypes.GPU{ + Units: atypes.NewResourceValue(gpuUnits), + Attributes: gpuAttributes, + }, + Memory: &atypes.Memory{ + Quantity: atypes.NewResourceValue(16 * unit.Gi), + }, + Storage: []atypes.Storage{ + { + Name: "default", + Quantity: atypes.NewResourceValue(8 * unit.Gi), + }, + }, + }, + Count: count, + }, + }, + }, + } +} + +func multipleSvcReplicasGenReservations(cpuUnits, gpuUnits uint64, count uint32) *testReservation { + var gpuAttributes atypes.Attributes + if gpuUnits > 0 { + gpuAttributes = append(gpuAttributes, atypes.Attribute{ + Key: "vendor/nvidia/model/a100", + Value: "true", + }) + } return &testReservation{ resources: dtypes.GroupSpec{ Name: "bla", Requirements: atypes.PlacementRequirements{}, - Resources: []dtypes.Resource{ + Resources: dtypes.ResourceUnits{ { - Resources: atypes.ResourceUnits{ + Resources: atypes.Resources{ + ID: 1, CPU: &atypes.CPU{ Units: atypes.NewResourceValue(cpuUnits), }, @@ -658,6 +755,28 @@ func multipleReplicasGenReservations(cpuUnits uint64, count uint32) *testReserva }, Count: count, }, + { + Resources: atypes.Resources{ + ID: 2, + CPU: &atypes.CPU{ + Units: atypes.NewResourceValue(cpuUnits), + }, + GPU: &atypes.GPU{ + Units: atypes.NewResourceValue(gpuUnits), + Attributes: gpuAttributes, + }, + Memory: &atypes.Memory{ + Quantity: atypes.NewResourceValue(16 * unit.Gi), + }, + Storage: []atypes.Storage{ + { + Name: "default", + Quantity: atypes.NewResourceValue(8 * unit.Gi), + }, + }, + }, + Count: count, + }, }, }, } diff --git a/cluster/manager.go b/cluster/manager.go index 4162925f..2f43e26d 100644 --- a/cluster/manager.go +++ b/cluster/manager.go @@ -17,7 +17,6 @@ import ( mani "github.com/akash-network/akash-api/go/manifest/v2beta2" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" "github.com/akash-network/node/pubsub" - sdlutil "github.com/akash-network/node/sdl/util" kubeclienterrors "github.com/akash-network/provider/cluster/kube/errors" ctypes "github.com/akash-network/provider/cluster/types/v1beta3" @@ -402,7 +401,7 @@ func (dm *deploymentManager) doDeploy(ctx context.Context) ([]string, []string, // Iterate over each entry, extracting the ingress services & leased IPs for _, service := range dm.deployment.ManifestGroup().Services { for _, expose := range service.Expose { - if sdlutil.ShouldBeIngress(expose) { + if expose.IsIngress() { if dm.config.DeploymentIngressStaticHosts { uid := manifest.IngressHost(dm.deployment.LeaseID(), service.Name) host := fmt.Sprintf("%s.%s", uid, dm.config.DeploymentIngressDomain) @@ -442,7 +441,7 @@ func (dm *deploymentManager) doDeploy(ctx context.Context) ([]string, []string, } for host, serviceExpose := range hosts { - externalPort := uint32(sdlutil.ExposeExternalPort(serviceExpose)) + externalPort := uint32(serviceExpose.GetExternalPort()) err = dm.client.DeclareHostname(ctx, dm.deployment.LeaseID(), host, hostToServiceName[host], externalPort) if err != nil { // TODO - counter @@ -455,10 +454,10 @@ func (dm *deploymentManager) doDeploy(ctx context.Context) ([]string, []string, endpointName := serviceExpose.expose.IP sharingKey := clusterutil.MakeIPSharingKey(dm.deployment.LeaseID(), endpointName) - externalPort := sdlutil.ExposeExternalPort(serviceExpose.expose) + externalPort := serviceExpose.expose.GetExternalPort() port := serviceExpose.expose.Port - err = dm.client.DeclareIP(ctx, dm.deployment.LeaseID(), serviceExpose.name, uint32(port), uint32(externalPort), serviceExpose.expose.Proto, sharingKey, false) + err = dm.client.DeclareIP(ctx, dm.deployment.LeaseID(), serviceExpose.name, port, uint32(externalPort), serviceExpose.expose.Proto, sharingKey, false) if err != nil { if !errors.Is(err, kubeclienterrors.ErrAlreadyExists) { dm.log.Error("failed adding IP declaration", "service", serviceExpose.name, "port", externalPort, "endpoint", serviceExpose.expose.IP, "err", err) diff --git a/cluster/mocks/client.go b/cluster/mocks/client.go index 8f097c89..d0309d8a 100644 --- a/cluster/mocks/client.go +++ b/cluster/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -1316,13 +1316,12 @@ func (_c *Client_TeardownLease_Call) RunAndReturn(run func(context.Context, mark return _c } -type mockConstructorTestingTNewClient interface { +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t mockConstructorTestingTNewClient) *Client { +}) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/cluster/mocks/cluster.go b/cluster/mocks/cluster.go index b8d30620..48aaf2f2 100644 --- a/cluster/mocks/cluster.go +++ b/cluster/mocks/cluster.go @@ -1,12 +1,12 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks import ( - clustertypesv1beta3 "github.com/akash-network/provider/cluster/types/v1beta3" + deploymentv1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mock "github.com/stretchr/testify/mock" - typesv1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" + typesv1beta3 "github.com/akash-network/provider/cluster/types/v1beta3" v1beta3 "github.com/akash-network/akash-api/go/node/market/v1beta3" ) @@ -25,23 +25,23 @@ func (_m *Cluster) EXPECT() *Cluster_Expecter { } // Reserve provides a mock function with given fields: _a0, _a1 -func (_m *Cluster) Reserve(_a0 v1beta3.OrderID, _a1 typesv1beta3.ResourceGroup) (clustertypesv1beta3.Reservation, error) { +func (_m *Cluster) Reserve(_a0 v1beta3.OrderID, _a1 deploymentv1beta3.ResourceGroup) (typesv1beta3.Reservation, error) { ret := _m.Called(_a0, _a1) - var r0 clustertypesv1beta3.Reservation + var r0 typesv1beta3.Reservation var r1 error - if rf, ok := ret.Get(0).(func(v1beta3.OrderID, typesv1beta3.ResourceGroup) (clustertypesv1beta3.Reservation, error)); ok { + if rf, ok := ret.Get(0).(func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) (typesv1beta3.Reservation, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(v1beta3.OrderID, typesv1beta3.ResourceGroup) clustertypesv1beta3.Reservation); ok { + if rf, ok := ret.Get(0).(func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) typesv1beta3.Reservation); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(clustertypesv1beta3.Reservation) + r0 = ret.Get(0).(typesv1beta3.Reservation) } } - if rf, ok := ret.Get(1).(func(v1beta3.OrderID, typesv1beta3.ResourceGroup) error); ok { + if rf, ok := ret.Get(1).(func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -57,24 +57,24 @@ type Cluster_Reserve_Call struct { // Reserve is a helper method to define mock.On call // - _a0 v1beta3.OrderID -// - _a1 typesv1beta3.ResourceGroup +// - _a1 deploymentv1beta3.ResourceGroup func (_e *Cluster_Expecter) Reserve(_a0 interface{}, _a1 interface{}) *Cluster_Reserve_Call { return &Cluster_Reserve_Call{Call: _e.mock.On("Reserve", _a0, _a1)} } -func (_c *Cluster_Reserve_Call) Run(run func(_a0 v1beta3.OrderID, _a1 typesv1beta3.ResourceGroup)) *Cluster_Reserve_Call { +func (_c *Cluster_Reserve_Call) Run(run func(_a0 v1beta3.OrderID, _a1 deploymentv1beta3.ResourceGroup)) *Cluster_Reserve_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(v1beta3.OrderID), args[1].(typesv1beta3.ResourceGroup)) + run(args[0].(v1beta3.OrderID), args[1].(deploymentv1beta3.ResourceGroup)) }) return _c } -func (_c *Cluster_Reserve_Call) Return(_a0 clustertypesv1beta3.Reservation, _a1 error) *Cluster_Reserve_Call { +func (_c *Cluster_Reserve_Call) Return(_a0 typesv1beta3.Reservation, _a1 error) *Cluster_Reserve_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *Cluster_Reserve_Call) RunAndReturn(run func(v1beta3.OrderID, typesv1beta3.ResourceGroup) (clustertypesv1beta3.Reservation, error)) *Cluster_Reserve_Call { +func (_c *Cluster_Reserve_Call) RunAndReturn(run func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) (typesv1beta3.Reservation, error)) *Cluster_Reserve_Call { _c.Call.Return(run) return _c } @@ -121,13 +121,12 @@ func (_c *Cluster_Unreserve_Call) RunAndReturn(run func(v1beta3.OrderID) error) return _c } -type mockConstructorTestingTNewCluster interface { +// NewCluster creates a new instance of Cluster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewCluster(t interface { mock.TestingT Cleanup(func()) -} - -// NewCluster creates a new instance of Cluster. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewCluster(t mockConstructorTestingTNewCluster) *Cluster { +}) *Cluster { mock := &Cluster{} mock.Mock.Test(t) diff --git a/cluster/mocks/hostname_service_client.go b/cluster/mocks/hostname_service_client.go index 9fa6dbcd..4af2fcd1 100644 --- a/cluster/mocks/hostname_service_client.go +++ b/cluster/mocks/hostname_service_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -209,13 +209,12 @@ func (_c *HostnameServiceClient_ReserveHostnames_Call) RunAndReturn(run func(con return _c } -type mockConstructorTestingTNewHostnameServiceClient interface { +// NewHostnameServiceClient creates a new instance of HostnameServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewHostnameServiceClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewHostnameServiceClient creates a new instance of HostnameServiceClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewHostnameServiceClient(t mockConstructorTestingTNewHostnameServiceClient) *HostnameServiceClient { +}) *HostnameServiceClient { mock := &HostnameServiceClient{} mock.Mock.Test(t) diff --git a/cluster/mocks/i_deployment.go b/cluster/mocks/i_deployment.go index b232b59e..3910d8da 100644 --- a/cluster/mocks/i_deployment.go +++ b/cluster/mocks/i_deployment.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -149,13 +149,12 @@ func (_c *IDeployment_ManifestGroup_Call) RunAndReturn(run func() *v2beta2.Group return _c } -type mockConstructorTestingTNewIDeployment interface { +// NewIDeployment creates a new instance of IDeployment. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIDeployment(t interface { mock.TestingT Cleanup(func()) -} - -// NewIDeployment creates a new instance of IDeployment. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIDeployment(t mockConstructorTestingTNewIDeployment) *IDeployment { +}) *IDeployment { mock := &IDeployment{} mock.Mock.Test(t) diff --git a/cluster/mocks/ip_operator_client.go b/cluster/mocks/ip_operator_client.go index 92514d15..d9fc1493 100644 --- a/cluster/mocks/ip_operator_client.go +++ b/cluster/mocks/ip_operator_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -247,13 +247,12 @@ func (_c *IPOperatorClient_String_Call) RunAndReturn(run func() string) *IPOpera return _c } -type mockConstructorTestingTNewIPOperatorClient interface { +// NewIPOperatorClient creates a new instance of IPOperatorClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewIPOperatorClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewIPOperatorClient creates a new instance of IPOperatorClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewIPOperatorClient(t mockConstructorTestingTNewIPOperatorClient) *IPOperatorClient { +}) *IPOperatorClient { mock := &IPOperatorClient{} mock.Mock.Test(t) diff --git a/cluster/mocks/metallb_client.go b/cluster/mocks/metallb_client.go index 6e13211c..90681167 100644 --- a/cluster/mocks/metallb_client.go +++ b/cluster/mocks/metallb_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -365,13 +365,12 @@ func (_c *MetalLBClient_Stop_Call) RunAndReturn(run func()) *MetalLBClient_Stop_ return _c } -type mockConstructorTestingTNewMetalLBClient interface { +// NewMetalLBClient creates a new instance of MetalLBClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMetalLBClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewMetalLBClient creates a new instance of MetalLBClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewMetalLBClient(t mockConstructorTestingTNewMetalLBClient) *MetalLBClient { +}) *MetalLBClient { mock := &MetalLBClient{} mock.Mock.Test(t) diff --git a/cluster/mocks/read_client.go b/cluster/mocks/read_client.go index 9cbc62b8..ca3fa349 100644 --- a/cluster/mocks/read_client.go +++ b/cluster/mocks/read_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -638,13 +638,12 @@ func (_c *ReadClient_ServiceStatus_Call) RunAndReturn(run func(context.Context, return _c } -type mockConstructorTestingTNewReadClient interface { +// NewReadClient creates a new instance of ReadClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReadClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewReadClient creates a new instance of ReadClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReadClient(t mockConstructorTestingTNewReadClient) *ReadClient { +}) *ReadClient { mock := &ReadClient{} mock.Mock.Test(t) diff --git a/cluster/mocks/reservation.go b/cluster/mocks/reservation.go index fc2a5564..be534091 100644 --- a/cluster/mocks/reservation.go +++ b/cluster/mocks/reservation.go @@ -1,12 +1,12 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks import ( + deploymentv1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" marketv1beta3 "github.com/akash-network/akash-api/go/node/market/v1beta3" - mock "github.com/stretchr/testify/mock" - typesv1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" + mock "github.com/stretchr/testify/mock" ) // Reservation is an autogenerated mock type for the Reservation type @@ -148,15 +148,15 @@ func (_c *Reservation_OrderID_Call) RunAndReturn(run func() marketv1beta3.OrderI } // Resources provides a mock function with given fields: -func (_m *Reservation) Resources() typesv1beta3.ResourceGroup { +func (_m *Reservation) Resources() deploymentv1beta3.ResourceGroup { ret := _m.Called() - var r0 typesv1beta3.ResourceGroup - if rf, ok := ret.Get(0).(func() typesv1beta3.ResourceGroup); ok { + var r0 deploymentv1beta3.ResourceGroup + if rf, ok := ret.Get(0).(func() deploymentv1beta3.ResourceGroup); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(typesv1beta3.ResourceGroup) + r0 = ret.Get(0).(deploymentv1beta3.ResourceGroup) } } @@ -180,18 +180,18 @@ func (_c *Reservation_Resources_Call) Run(run func()) *Reservation_Resources_Cal return _c } -func (_c *Reservation_Resources_Call) Return(_a0 typesv1beta3.ResourceGroup) *Reservation_Resources_Call { +func (_c *Reservation_Resources_Call) Return(_a0 deploymentv1beta3.ResourceGroup) *Reservation_Resources_Call { _c.Call.Return(_a0) return _c } -func (_c *Reservation_Resources_Call) RunAndReturn(run func() typesv1beta3.ResourceGroup) *Reservation_Resources_Call { +func (_c *Reservation_Resources_Call) RunAndReturn(run func() deploymentv1beta3.ResourceGroup) *Reservation_Resources_Call { _c.Call.Return(run) return _c } // SetAllocatedResources provides a mock function with given fields: _a0 -func (_m *Reservation) SetAllocatedResources(_a0 []typesv1beta3.Resources) { +func (_m *Reservation) SetAllocatedResources(_a0 deploymentv1beta3.ResourceUnits) { _m.Called(_a0) } @@ -201,14 +201,14 @@ type Reservation_SetAllocatedResources_Call struct { } // SetAllocatedResources is a helper method to define mock.On call -// - _a0 []typesv1beta3.Resources +// - _a0 deploymentv1beta3.ResourceUnits func (_e *Reservation_Expecter) SetAllocatedResources(_a0 interface{}) *Reservation_SetAllocatedResources_Call { return &Reservation_SetAllocatedResources_Call{Call: _e.mock.On("SetAllocatedResources", _a0)} } -func (_c *Reservation_SetAllocatedResources_Call) Run(run func(_a0 []typesv1beta3.Resources)) *Reservation_SetAllocatedResources_Call { +func (_c *Reservation_SetAllocatedResources_Call) Run(run func(_a0 deploymentv1beta3.ResourceUnits)) *Reservation_SetAllocatedResources_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].([]typesv1beta3.Resources)) + run(args[0].(deploymentv1beta3.ResourceUnits)) }) return _c } @@ -218,7 +218,7 @@ func (_c *Reservation_SetAllocatedResources_Call) Return() *Reservation_SetAlloc return _c } -func (_c *Reservation_SetAllocatedResources_Call) RunAndReturn(run func([]typesv1beta3.Resources)) *Reservation_SetAllocatedResources_Call { +func (_c *Reservation_SetAllocatedResources_Call) RunAndReturn(run func(deploymentv1beta3.ResourceUnits)) *Reservation_SetAllocatedResources_Call { _c.Call.Return(run) return _c } @@ -256,13 +256,12 @@ func (_c *Reservation_SetClusterParams_Call) RunAndReturn(run func(interface{})) return _c } -type mockConstructorTestingTNewReservation interface { +// NewReservation creates a new instance of Reservation. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReservation(t interface { mock.TestingT Cleanup(func()) -} - -// NewReservation creates a new instance of Reservation. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReservation(t mockConstructorTestingTNewReservation) *Reservation { +}) *Reservation { mock := &Reservation{} mock.Mock.Test(t) diff --git a/cluster/mocks/reservation_group.go b/cluster/mocks/reservation_group.go index c5691f4a..0df96d31 100644 --- a/cluster/mocks/reservation_group.go +++ b/cluster/mocks/reservation_group.go @@ -1,9 +1,9 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks import ( - typesv1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" + deploymentv1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mock "github.com/stretchr/testify/mock" ) @@ -64,15 +64,15 @@ func (_c *ReservationGroup_ClusterParams_Call) RunAndReturn(run func() interface } // Resources provides a mock function with given fields: -func (_m *ReservationGroup) Resources() typesv1beta3.ResourceGroup { +func (_m *ReservationGroup) Resources() deploymentv1beta3.ResourceGroup { ret := _m.Called() - var r0 typesv1beta3.ResourceGroup - if rf, ok := ret.Get(0).(func() typesv1beta3.ResourceGroup); ok { + var r0 deploymentv1beta3.ResourceGroup + if rf, ok := ret.Get(0).(func() deploymentv1beta3.ResourceGroup); ok { r0 = rf() } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(typesv1beta3.ResourceGroup) + r0 = ret.Get(0).(deploymentv1beta3.ResourceGroup) } } @@ -96,18 +96,18 @@ func (_c *ReservationGroup_Resources_Call) Run(run func()) *ReservationGroup_Res return _c } -func (_c *ReservationGroup_Resources_Call) Return(_a0 typesv1beta3.ResourceGroup) *ReservationGroup_Resources_Call { +func (_c *ReservationGroup_Resources_Call) Return(_a0 deploymentv1beta3.ResourceGroup) *ReservationGroup_Resources_Call { _c.Call.Return(_a0) return _c } -func (_c *ReservationGroup_Resources_Call) RunAndReturn(run func() typesv1beta3.ResourceGroup) *ReservationGroup_Resources_Call { +func (_c *ReservationGroup_Resources_Call) RunAndReturn(run func() deploymentv1beta3.ResourceGroup) *ReservationGroup_Resources_Call { _c.Call.Return(run) return _c } // SetAllocatedResources provides a mock function with given fields: _a0 -func (_m *ReservationGroup) SetAllocatedResources(_a0 []typesv1beta3.Resources) { +func (_m *ReservationGroup) SetAllocatedResources(_a0 deploymentv1beta3.ResourceUnits) { _m.Called(_a0) } @@ -117,14 +117,14 @@ type ReservationGroup_SetAllocatedResources_Call struct { } // SetAllocatedResources is a helper method to define mock.On call -// - _a0 []typesv1beta3.Resources +// - _a0 deploymentv1beta3.ResourceUnits func (_e *ReservationGroup_Expecter) SetAllocatedResources(_a0 interface{}) *ReservationGroup_SetAllocatedResources_Call { return &ReservationGroup_SetAllocatedResources_Call{Call: _e.mock.On("SetAllocatedResources", _a0)} } -func (_c *ReservationGroup_SetAllocatedResources_Call) Run(run func(_a0 []typesv1beta3.Resources)) *ReservationGroup_SetAllocatedResources_Call { +func (_c *ReservationGroup_SetAllocatedResources_Call) Run(run func(_a0 deploymentv1beta3.ResourceUnits)) *ReservationGroup_SetAllocatedResources_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].([]typesv1beta3.Resources)) + run(args[0].(deploymentv1beta3.ResourceUnits)) }) return _c } @@ -134,7 +134,7 @@ func (_c *ReservationGroup_SetAllocatedResources_Call) Return() *ReservationGrou return _c } -func (_c *ReservationGroup_SetAllocatedResources_Call) RunAndReturn(run func([]typesv1beta3.Resources)) *ReservationGroup_SetAllocatedResources_Call { +func (_c *ReservationGroup_SetAllocatedResources_Call) RunAndReturn(run func(deploymentv1beta3.ResourceUnits)) *ReservationGroup_SetAllocatedResources_Call { _c.Call.Return(run) return _c } @@ -172,13 +172,12 @@ func (_c *ReservationGroup_SetClusterParams_Call) RunAndReturn(run func(interfac return _c } -type mockConstructorTestingTNewReservationGroup interface { +// NewReservationGroup creates a new instance of ReservationGroup. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewReservationGroup(t interface { mock.TestingT Cleanup(func()) -} - -// NewReservationGroup creates a new instance of ReservationGroup. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewReservationGroup(t mockConstructorTestingTNewReservationGroup) *ReservationGroup { +}) *ReservationGroup { mock := &ReservationGroup{} mock.Mock.Test(t) diff --git a/cluster/mocks/service.go b/cluster/mocks/service.go index 2098c8e7..348a8d2e 100644 --- a/cluster/mocks/service.go +++ b/cluster/mocks/service.go @@ -1,11 +1,11 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks import ( context "context" - nodetypesv1beta3 "github.com/akash-network/akash-api/go/node/types/v1beta3" + deploymentv1beta3 "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mock "github.com/stretchr/testify/mock" types "github.com/cosmos/cosmos-sdk/types" @@ -270,15 +270,15 @@ func (_c *Service_Ready_Call) RunAndReturn(run func() <-chan struct{}) *Service_ } // Reserve provides a mock function with given fields: _a0, _a1 -func (_m *Service) Reserve(_a0 v1beta3.OrderID, _a1 nodetypesv1beta3.ResourceGroup) (typesv1beta3.Reservation, error) { +func (_m *Service) Reserve(_a0 v1beta3.OrderID, _a1 deploymentv1beta3.ResourceGroup) (typesv1beta3.Reservation, error) { ret := _m.Called(_a0, _a1) var r0 typesv1beta3.Reservation var r1 error - if rf, ok := ret.Get(0).(func(v1beta3.OrderID, nodetypesv1beta3.ResourceGroup) (typesv1beta3.Reservation, error)); ok { + if rf, ok := ret.Get(0).(func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) (typesv1beta3.Reservation, error)); ok { return rf(_a0, _a1) } - if rf, ok := ret.Get(0).(func(v1beta3.OrderID, nodetypesv1beta3.ResourceGroup) typesv1beta3.Reservation); ok { + if rf, ok := ret.Get(0).(func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) typesv1beta3.Reservation); ok { r0 = rf(_a0, _a1) } else { if ret.Get(0) != nil { @@ -286,7 +286,7 @@ func (_m *Service) Reserve(_a0 v1beta3.OrderID, _a1 nodetypesv1beta3.ResourceGro } } - if rf, ok := ret.Get(1).(func(v1beta3.OrderID, nodetypesv1beta3.ResourceGroup) error); ok { + if rf, ok := ret.Get(1).(func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) error); ok { r1 = rf(_a0, _a1) } else { r1 = ret.Error(1) @@ -302,14 +302,14 @@ type Service_Reserve_Call struct { // Reserve is a helper method to define mock.On call // - _a0 v1beta3.OrderID -// - _a1 nodetypesv1beta3.ResourceGroup +// - _a1 deploymentv1beta3.ResourceGroup func (_e *Service_Expecter) Reserve(_a0 interface{}, _a1 interface{}) *Service_Reserve_Call { return &Service_Reserve_Call{Call: _e.mock.On("Reserve", _a0, _a1)} } -func (_c *Service_Reserve_Call) Run(run func(_a0 v1beta3.OrderID, _a1 nodetypesv1beta3.ResourceGroup)) *Service_Reserve_Call { +func (_c *Service_Reserve_Call) Run(run func(_a0 v1beta3.OrderID, _a1 deploymentv1beta3.ResourceGroup)) *Service_Reserve_Call { _c.Call.Run(func(args mock.Arguments) { - run(args[0].(v1beta3.OrderID), args[1].(nodetypesv1beta3.ResourceGroup)) + run(args[0].(v1beta3.OrderID), args[1].(deploymentv1beta3.ResourceGroup)) }) return _c } @@ -319,7 +319,7 @@ func (_c *Service_Reserve_Call) Return(_a0 typesv1beta3.Reservation, _a1 error) return _c } -func (_c *Service_Reserve_Call) RunAndReturn(run func(v1beta3.OrderID, nodetypesv1beta3.ResourceGroup) (typesv1beta3.Reservation, error)) *Service_Reserve_Call { +func (_c *Service_Reserve_Call) RunAndReturn(run func(v1beta3.OrderID, deploymentv1beta3.ResourceGroup) (typesv1beta3.Reservation, error)) *Service_Reserve_Call { _c.Call.Return(run) return _c } @@ -466,13 +466,12 @@ func (_c *Service_Unreserve_Call) RunAndReturn(run func(v1beta3.OrderID) error) return _c } -type mockConstructorTestingTNewService interface { +// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewService(t interface { mock.TestingT Cleanup(func()) -} - -// NewService creates a new instance of Service. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewService(t mockConstructorTestingTNewService) *Service { +}) *Service { mock := &Service{} mock.Mock.Test(t) diff --git a/cluster/reservation.go b/cluster/reservation.go index 14ff909a..8ad47f4b 100644 --- a/cluster/reservation.go +++ b/cluster/reservation.go @@ -1,6 +1,7 @@ package cluster import ( + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" @@ -8,7 +9,7 @@ import ( "github.com/akash-network/provider/cluster/util" ) -func newReservation(order mtypes.OrderID, resources atypes.ResourceGroup) *reservation { +func newReservation(order mtypes.OrderID, resources dtypes.ResourceGroup) *reservation { return &reservation{ order: order, resources: resources, @@ -17,8 +18,8 @@ func newReservation(order mtypes.OrderID, resources atypes.ResourceGroup) *reser type reservation struct { order mtypes.OrderID - resources atypes.ResourceGroup - adjustedResources []atypes.Resources + resources dtypes.ResourceGroup + adjustedResources dtypes.ResourceUnits clusterParams interface{} endpointQuantity uint allocated bool @@ -31,11 +32,11 @@ func (r *reservation) OrderID() mtypes.OrderID { return r.order } -func (r *reservation) Resources() atypes.ResourceGroup { +func (r *reservation) Resources() dtypes.ResourceGroup { return r.resources } -func (r *reservation) SetAllocatedResources(val []atypes.Resources) { +func (r *reservation) SetAllocatedResources(val dtypes.ResourceUnits) { r.adjustedResources = val } diff --git a/cluster/service.go b/cluster/service.go index 0930cfd2..cbf2012c 100644 --- a/cluster/service.go +++ b/cluster/service.go @@ -3,6 +3,7 @@ package cluster import ( "context" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" "github.com/boz/go-lifecycle" sdktypes "github.com/cosmos/cosmos-sdk/types" @@ -13,7 +14,6 @@ import ( "github.com/tendermint/tendermint/libs/log" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" - atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" "github.com/akash-network/node/pubsub" "github.com/akash-network/provider/cluster/operatorclients" @@ -43,7 +43,7 @@ var ( // //go:generate mockery --name Cluster type Cluster interface { - Reserve(mtypes.OrderID, atypes.ResourceGroup) (ctypes.Reservation, error) + Reserve(mtypes.OrderID, dtypes.ResourceGroup) (ctypes.Reservation, error) Unreserve(mtypes.OrderID) error } @@ -215,7 +215,7 @@ func (s *service) Ready() <-chan struct{} { return s.inventory.ready() } -func (s *service) Reserve(order mtypes.OrderID, resources atypes.ResourceGroup) (ctypes.Reservation, error) { +func (s *service) Reserve(order mtypes.OrderID, resources dtypes.ResourceGroup) (ctypes.Reservation, error) { return s.inventory.reserve(order, resources) } diff --git a/cluster/types/v1beta3/reservation.go b/cluster/types/v1beta3/reservation.go index ea6a6cb8..d13958ee 100644 --- a/cluster/types/v1beta3/reservation.go +++ b/cluster/types/v1beta3/reservation.go @@ -1,14 +1,14 @@ package v1beta3 import ( + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" - atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" ) //go:generate mockery --name ReservationGroup --output ../../mocks type ReservationGroup interface { - Resources() atypes.ResourceGroup - SetAllocatedResources([]atypes.Resources) + Resources() dtypes.ResourceGroup + SetAllocatedResources(dtypes.ResourceUnits) SetClusterParams(interface{}) ClusterParams() interface{} } diff --git a/cluster/types/v1beta3/types.go b/cluster/types/v1beta3/types.go index 75e7a37d..e41f2fff 100644 --- a/cluster/types/v1beta3/types.go +++ b/cluster/types/v1beta3/types.go @@ -7,6 +7,7 @@ import ( "io" "strings" + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" sdk "github.com/cosmos/cosmos-sdk/types" "github.com/pkg/errors" eventsv1 "k8s.io/api/events/v1" @@ -19,7 +20,8 @@ import ( var ( // ErrInsufficientCapacity is the new error when capacity is insufficient - ErrInsufficientCapacity = errors.New("insufficient capacity") + ErrInsufficientCapacity = errors.New("insufficient capacity") + ErrGroupResourceMismatch = errors.New("group resource mismatch") ) // Status stores current leases and inventory statuses @@ -123,25 +125,25 @@ func ParseStorageAttributes(attrs types.Attributes) (StorageAttributes, error) { return res, nil } -func (inv *InventoryMetricTotal) AddResources(res types.Resources) { +func (inv *InventoryMetricTotal) AddResources(res dtypes.ResourceUnit) { cpu := sdk.NewIntFromUint64(inv.CPU) gpu := sdk.NewIntFromUint64(inv.GPU) mem := sdk.NewIntFromUint64(inv.Memory) ephemeralStorage := sdk.NewIntFromUint64(inv.StorageEphemeral) - if res.Resources.CPU != nil { - cpu = cpu.Add(res.Resources.CPU.Units.Val.MulRaw(int64(res.Count))) + if res.CPU != nil { + cpu = cpu.Add(res.CPU.Units.Val.MulRaw(int64(res.Count))) } - if res.Resources.GPU != nil { - gpu = gpu.Add(res.Resources.GPU.Units.Val.MulRaw(int64(res.Count))) + if res.GPU != nil { + gpu = gpu.Add(res.GPU.Units.Val.MulRaw(int64(res.Count))) } - if res.Resources.Memory != nil { - mem = mem.Add(res.Resources.Memory.Quantity.Val.MulRaw(int64(res.Count))) + if res.Memory != nil { + mem = mem.Add(res.Memory.Quantity.Val.MulRaw(int64(res.Count))) } - for _, storage := range res.Resources.Storage { + for _, storage := range res.Storage { if storageClass, found := storage.Attributes.Find(sdl.StorageAttributeClass).AsString(); !found { ephemeralStorage = ephemeralStorage.Add(storage.Quantity.Val.MulRaw(int64(res.Count))) } else { diff --git a/cluster/util/endpoint_quantity.go b/cluster/util/endpoint_quantity.go index 5bf17d26..ffcd2f11 100644 --- a/cluster/util/endpoint_quantity.go +++ b/cluster/util/endpoint_quantity.go @@ -1,17 +1,20 @@ package util -import atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" +import ( + dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" + atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" +) -func GetEndpointQuantityOfResourceGroup(resources atypes.ResourceGroup, kind atypes.Endpoint_Kind) uint { +func GetEndpointQuantityOfResourceGroup(resources dtypes.ResourceGroup, kind atypes.Endpoint_Kind) uint { endpoints := make(map[uint32]struct{}) - for _, resource := range resources.GetResources() { + for _, resource := range resources.GetResourceUnits() { accumEndpointsOfResources(resource.Resources, kind, endpoints) } return uint(len(endpoints)) } -func accumEndpointsOfResources(r atypes.ResourceUnits, kind atypes.Endpoint_Kind, accum map[uint32]struct{}) { +func accumEndpointsOfResources(r atypes.Resources, kind atypes.Endpoint_Kind, accum map[uint32]struct{}) { for _, endpoint := range r.Endpoints { if endpoint.Kind == kind { accum[endpoint.SequenceNumber] = struct{}{} @@ -19,7 +22,7 @@ func accumEndpointsOfResources(r atypes.ResourceUnits, kind atypes.Endpoint_Kind } } -func GetEndpointQuantityOfResourceUnits(r atypes.ResourceUnits, kind atypes.Endpoint_Kind) uint { +func GetEndpointQuantityOfResourceUnits(r atypes.Resources, kind atypes.Endpoint_Kind) uint { endpoints := make(map[uint32]struct{}) accumEndpointsOfResources(r, kind, endpoints) diff --git a/disc.md b/disc.md new file mode 100644 index 00000000..e69de29b diff --git a/gateway/rest/integration_test.go b/gateway/rest/integration_test.go index 2c517ebc..1eee6619 100644 --- a/gateway/rest/integration_test.go +++ b/gateway/rest/integration_test.go @@ -160,13 +160,15 @@ func mockManifestGroups(m integrationMocks, leaseID mtypes.LeaseID) { Image: testImageName, Args: nil, Env: nil, - Resources: v2beta2.ResourceUnits{ + Resources: v2beta2.Resources{ CPU: 1000, Memory: "3333", - Storage: []v2beta2.ResourceUnitsStorage{{ - Name: "", - Size: "4444", - }}, + Storage: []v2beta2.ResourcesStorage{ + { + Name: "", + Size: "4444", + }, + }, }, Count: 1, Expose: []v2beta2.ManifestServiceExpose{{ diff --git a/gateway/rest/router_test.go b/gateway/rest/router_test.go index 824359ef..ab0de0f2 100644 --- a/gateway/rest/router_test.go +++ b/gateway/rest/router_test.go @@ -627,13 +627,15 @@ func mockManifestGroupsForRouterTest(rt *routerTest, leaseID mtypes.LeaseID) { Image: testImageName, Args: nil, Env: nil, - Resources: v2beta2.ResourceUnits{ + Resources: v2beta2.Resources{ CPU: 1000, Memory: "3333", - Storage: []v2beta2.ResourceUnitsStorage{{ - Name: "", - Size: "4444", - }}, + Storage: []v2beta2.ResourcesStorage{ + { + Name: "", + Size: "4444", + }, + }, }, Count: 1, Expose: []v2beta2.ManifestServiceExpose{{ diff --git a/go.mod b/go.mod index bff67e1a..cb0e6546 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,8 @@ module github.com/akash-network/provider go 1.18 require ( - github.com/akash-network/akash-api v0.0.23 - github.com/akash-network/node v0.23.1-rc0 + github.com/akash-network/akash-api v0.0.24 + github.com/akash-network/node v0.23.2-rc1 github.com/avast/retry-go/v4 v4.3.4 github.com/boz/go-lifecycle v0.1.1 github.com/cosmos/cosmos-sdk v0.45.16 @@ -70,6 +70,7 @@ require ( github.com/ChainSafe/go-schnorrkel v0.0.0-20200405005733-88cbf1b4c40d // indirect github.com/DataDog/zstd v1.5.0 // indirect github.com/HdrHistogram/hdrhistogram-go v1.1.2 // indirect + github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 // indirect github.com/Workiva/go-datastructures v1.0.53 // indirect github.com/alessio/shellescape v1.4.1 // indirect github.com/armon/go-metrics v0.4.1 // indirect @@ -81,6 +82,7 @@ require ( github.com/cenkalti/backoff/v3 v3.2.2 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/cloudflare/circl v1.3.3 // indirect github.com/cockroachdb/errors v1.9.1 // indirect github.com/cockroachdb/logtags v0.0.0-20230118201751-21c54148d20b // indirect github.com/cockroachdb/pebble v0.0.0-20220817183557-09c6e030a677 // indirect @@ -128,10 +130,13 @@ require ( github.com/google/btree v1.1.2 // indirect github.com/google/gnostic v0.6.9 // indirect github.com/google/go-cmp v0.5.9 // indirect + github.com/google/go-github/v53 v53.2.0 // indirect + github.com/google/go-querystring v1.1.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/orderedcode v0.0.1 // indirect github.com/google/safetext v0.0.0-20220905092116-b49f7bc46da2 // indirect github.com/gorilla/handlers v1.5.1 // indirect + github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/gsterjov/go-libsecret v0.0.0-20161001094733-a6f4afe4910c // indirect @@ -231,10 +236,10 @@ require ( golang.org/x/crypto v0.7.0 // indirect golang.org/x/exp v0.0.0-20221019170559-20944726eadf // indirect golang.org/x/mod v0.9.0 // indirect - golang.org/x/net v0.9.0 // indirect - golang.org/x/oauth2 v0.7.0 // indirect - golang.org/x/sys v0.7.0 // indirect - golang.org/x/term v0.7.0 // indirect + golang.org/x/net v0.10.0 // indirect + golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/sys v0.8.0 // indirect + golang.org/x/term v0.8.0 // indirect golang.org/x/text v0.9.0 // indirect golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.6.0 // indirect diff --git a/go.sum b/go.sum index 4d67e598..89915f74 100644 --- a/go.sum +++ b/go.sum @@ -157,6 +157,8 @@ github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5 h1:TngWCqHvy9oXAN6lEV github.com/Nvveen/Gotty v0.0.0-20120604004816-cd527374f1e5/go.mod h1:lmUJ/7eu/Q8D7ML55dXQrVaamCz2vxCfdQBasLZfHKk= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8 h1:wPbRQzjjwFc0ih8puEVAOFGELsn1zoIIYdxvML7mDxA= +github.com/ProtonMail/go-crypto v0.0.0-20230217124315-7d5c6f04bbb8/go.mod h1:I0gYDMZ6Z5GRU7l58bNFSkPTFN6Yl12dsUlAZ8xy98g= github.com/PuerkitoBio/purell v1.0.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= @@ -181,16 +183,16 @@ github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia github.com/agnivade/levenshtein v1.0.1/go.mod h1:CURSv5d9Uaml+FovSIICkLbAUZ9S4RqaHDIsdSBg7lM= github.com/ajg/form v1.5.1/go.mod h1:uL1WgH+h2mgNtvBq0339dVnzXdBETtL2LeUXaIv25UY= github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw= -github.com/akash-network/akash-api v0.0.23 h1:zsu5ARReF7h8pD5jlBci3RpyJaahcvwI56IvNpR9+gE= -github.com/akash-network/akash-api v0.0.23/go.mod h1:9/uYusyBcZecBQCgZWUbXRu0i1tyxj4/ze45XB2oLIU= +github.com/akash-network/akash-api v0.0.24 h1:nIuftXhNI6w5oLjfncbb2BLxLEZvkwfxknbjYuFBI5M= +github.com/akash-network/akash-api v0.0.24/go.mod h1:9/uYusyBcZecBQCgZWUbXRu0i1tyxj4/ze45XB2oLIU= github.com/akash-network/cometbft v0.34.27-akash h1:V1dApDOr8Ee7BJzYyQ7Z9VBtrAul4+baMeA6C49dje0= github.com/akash-network/cometbft v0.34.27-akash/go.mod h1:BcCbhKv7ieM0KEddnYXvQZR+pZykTKReJJYf7YC7qhw= github.com/akash-network/ledger-go v0.14.3 h1:LCEFkTfgGA2xFMN2CtiKvXKE7dh0QSM77PJHCpSkaAo= github.com/akash-network/ledger-go v0.14.3/go.mod h1:NfsjfFvno9Kaq6mfpsKz4sqjnAVVEsVsnBJfKB4ueAs= github.com/akash-network/ledger-go/cosmos v0.14.3 h1:bEI9jLHM+Lm55idi4RfJlDez4/rVJs7E1MT0U2whYqI= github.com/akash-network/ledger-go/cosmos v0.14.3/go.mod h1:SjAfheQTE4rWk0ir+wjbOWxwj8nc8E4AZ08NdsvYG24= -github.com/akash-network/node v0.23.1-rc0 h1:HzQeXHak39Qw25+6Bz69zoYziAM0YNrYkq4dcFHYFHM= -github.com/akash-network/node v0.23.1-rc0/go.mod h1:xjOyEXnXbWEBLO3b0MKFBenUs7hTrF1rGLyJylSn3v8= +github.com/akash-network/node v0.23.2-rc1 h1:Fz56MLR4YLyZnxbQ/HIou6OGEhwGknYt6tpEevRndDo= +github.com/akash-network/node v0.23.2-rc1/go.mod h1:hsqEvJ843hBAAKBs3b99cYEd7OOyJxLzlWHiED+ZPSI= github.com/alecthomas/participle/v2 v2.0.0-alpha7 h1:cK4vjj0VSgb3lN1nuKA5F7dw+1s1pWBe5bx7nNCnN+c= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= @@ -328,6 +330,9 @@ github.com/circonus-labs/circonusllhist v0.1.3 h1:TJH+oke8D16535+jHExHj4nQvzlZrj github.com/circonus-labs/circonusllhist v0.1.3/go.mod h1:kMXHVDlOchFAehlya5ePtbp5jckzBHf4XRpQvBOLI+I= github.com/clbanning/x2j v0.0.0-20191024224557-825249438eec/go.mod h1:jMjuTZXRI4dUb/I5gc9Hdhagfvm9+RyrPryS/auMzxE= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cloudflare/circl v1.1.0/go.mod h1:prBCrKB9DV4poKZY1l9zBXg2QJY7mvgRvtMxxK7fi4I= +github.com/cloudflare/circl v1.3.3 h1:fE/Qz0QdIGqeWfnwq0RE0R7MI51s0M2E4Ga9kq5AEMs= +github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cloudfoundry-community/go-cfclient v0.0.0-20190201205600-f136f9222381/go.mod h1:e5+USP2j8Le2M0Jo3qKPFnNhuo1wueU4nWHCXBOfQ14= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= @@ -799,10 +804,13 @@ github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-github v17.0.0+incompatible/go.mod h1:zLgOLi98H3fifZn+44m+umXrS52loVEgC2AApnigrVQ= +github.com/google/go-github/v53 v53.2.0 h1:wvz3FyF53v4BK+AsnvCmeNhf8AkTaeh2SoYu/XUvTtI= +github.com/google/go-github/v53 v53.2.0/go.mod h1:XhFRObz+m/l+UCm9b7KSIC3lT3NWSXGt7mOsAWEloao= github.com/google/go-metrics-stackdriver v0.2.0 h1:rbs2sxHAPn2OtUj9JdR/Gij1YKGl0BTVD0augB+HEjE= github.com/google/go-metrics-stackdriver v0.2.0/go.mod h1:KLcPyp3dWJAFD+yHisGlJSZktIsTjb50eB72U2YZ9K0= github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= +github.com/google/go-querystring v1.1.0/go.mod h1:Kcdr2DB4koayq7X8pmAG4sNG59So17icRSOU623lUBU= github.com/google/gofuzz v0.0.0-20170612174753-24818f796faf/go.mod h1:HP5RmnzzSNb993RKQDq4+1A4ia9nllfqcQFTQJedwGI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.1.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -866,6 +874,8 @@ github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad github.com/gotestyourself/gotestyourself v2.2.0+incompatible/go.mod h1:zZKM6oeNM8k+FRljX1mnzVYeS8wiGgQyvST1/GafPbY= github.com/graph-gophers/graphql-go v1.3.0/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc= github.com/gregjones/httpcache v0.0.0-20180305231024-9cad4c3443a7/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79 h1:+ngKgrYPPJrOjhax5N+uePQ0Fh1Z7PheYoUI/0nzkPA= +github.com/gregjones/httpcache v0.0.0-20190611155906-901d90724c79/go.mod h1:FecbI9+v66THATjSRHfNgh1IVFe/9kFxbXtjV0ctIMA= github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= @@ -2019,8 +2029,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20211209124913-491a49abca63/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.9.0 h1:aWJ/m6xSmxWBx+V0XRHTlrYrPG56jKsLdTFmsSsCzOM= -golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns= +golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190130055435-99b60b757ec1/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -2041,8 +2051,8 @@ golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.7.0 h1:qe6s0zUXlPX80/dITx3440hWZ7GwMwgDDyrSGTPJG/g= -golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4= +golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= +golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -2165,14 +2175,14 @@ golang.org/x/sys v0.0.0-20220503163025-988cb79eb6c6/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.7.0 h1:3jlCCIQZPdOYu1h8BkNvLz8Kgwtae2cagcG/VamtZRU= -golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210615171337-6886f2dfbf5b/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.7.0 h1:BEvjmm5fURWqcfbSKTdpkDXYBrUS1c0m8agp14W48vQ= -golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY= +golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= golang.org/x/text v0.0.0-20160726164857-2910a502d2bf/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/manifest/manager.go b/manifest/manager.go index b19db728..9c3f1496 100644 --- a/manifest/manager.go +++ b/manifest/manager.go @@ -16,8 +16,6 @@ import ( dtypes "github.com/akash-network/akash-api/go/node/deployment/v1beta3" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" "github.com/akash-network/node/pubsub" - "github.com/akash-network/node/sdl" - sdlutil "github.com/akash-network/node/sdl/util" "github.com/akash-network/node/util/runner" clustertypes "github.com/akash-network/provider/cluster/types/v1beta3" @@ -384,7 +382,7 @@ func (m *manager) validateRequests() { default: } if err := m.validateRequest(req); err != nil { - m.log.Error("invalid manifest", "err", err) + m.log.Error("invalid manifest: %s", err.Error()) req.ch <- err continue } @@ -392,8 +390,8 @@ func (m *manager) validateRequests() { // The manifest has been grabbed from the request but not published yet, store this response m.pendingRequests = append(m.pendingRequests, req) - } + m.requests = nil // all requests processed at this time m.log.Debug("requests valid", "num-requests", len(manifests)) @@ -413,7 +411,7 @@ func (m *manager) validateRequest(req manifestRequest) error { // ensure that an uploaded manifest matches the hash declared on // the Akash Deployment.Version - version, err := sdl.ManifestVersion(req.value.Manifest) + version, err := req.value.Manifest.Version() if err != nil { return err } @@ -431,11 +429,7 @@ func (m *manager) validateRequest(req manifestRequest) error { return ErrManifestVersion } - if err = maniv2beta2.ValidateManifest(req.value.Manifest); err != nil { - return err - } - - if err = maniv2beta2.ValidateManifestWithDeployment(&req.value.Manifest, m.data.Groups); err != nil { + if err = req.value.Manifest.CheckAgainstDeployment(m.data.Groups); err != nil { return err } @@ -444,6 +438,7 @@ func (m *manager) validateRequest(req manifestRequest) error { for _, lease := range m.localLeases { groupNames = append(groupNames, lease.Group.GroupSpec.Name) } + // Check that hostnames are not in use if err = m.checkHostnamesForManifest(req.value.Manifest, groupNames); err != nil { return err @@ -475,8 +470,9 @@ func (m *manager) checkHostnamesForManifest(requestManifest maniv2beta2.Manifest // For each service that exposes via an Ingress, then require a hsotname for _, service := range mgroup.Services { for _, expose := range service.Expose { - if sdlutil.ShouldBeIngress(expose) && len(expose.Hosts) == 0 { - return fmt.Errorf("%w: service %q exposed on %d:%s must have a hostname", errManifestRejected, service.Name, sdlutil.ExposeExternalPort(expose), expose.Proto) + if expose.IsIngress() && len(expose.Hosts) == 0 { + return fmt.Errorf("%w: service %q exposed on %d:%s must have a hostname", + errManifestRejected, service.Name, expose.GetExternalPort(), expose.Proto) } } } diff --git a/manifest/manager_test.go b/manifest/manager_test.go index 5499b27c..3d0637ff 100644 --- a/manifest/manager_test.go +++ b/manifest/manager_test.go @@ -45,7 +45,7 @@ package manifest // if mani != nil { // m, err := mani.Manifest() // require.NoError(t, err) -// version, err = sdl.ManifestVersion(m) +// version, err = m.Version() // require.NoError(t, err) // require.NotNil(t, version) // } else { @@ -256,7 +256,7 @@ package manifest // lid.GSeq = 99999 // did := lid.DeploymentID() // -// version, err := sdl.ManifestVersion(sdlManifest) +// version, err := sdlManifest.Version() // require.NotNil(t, version) // require.NoError(t, err) // leases := []mtypes.Lease{{ @@ -309,7 +309,7 @@ package manifest // Group: dgroup, // Price: sdk.NewDecCoin("uakt", sdk.NewInt(111)), // } -// version, err := sdl.ManifestVersion(sdlManifest) +// version, err := sdlManifest.Version() // require.NotNil(t, version) // // require.NoError(t, err) @@ -367,7 +367,7 @@ package manifest // Group: dgroup, // Price: sdk.NewDecCoinFromDec(testutil.CoinDenom, sdk.NewDec(111)), // } -// version, err := sdl.ManifestVersion(sdlManifest) +// version, err := sdlManifest.Version() // require.NotNil(t, version) // require.NoError(t, err) // leases := []mtypes.Lease{{ @@ -396,7 +396,7 @@ package manifest // sdlManifest, err = sdl2NewContainer.Manifest() // require.NoError(t, err) // -// version, err = sdl.ManifestVersion(sdlManifest) +// version, err = sdlManifestVersion() // require.NoError(t, err) // // update := dtypes.EventDeploymentUpdated{ diff --git a/manifest/mocks/client.go b/manifest/mocks/client.go index 07973096..961f88eb 100644 --- a/manifest/mocks/client.go +++ b/manifest/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -122,13 +122,12 @@ func (_c *Client_Submit_Call) RunAndReturn(run func(context.Context, v1beta3.Dep return _c } -type mockConstructorTestingTNewClient interface { +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t mockConstructorTestingTNewClient) *Client { +}) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/manifest/mocks/status_client.go b/manifest/mocks/status_client.go index 6b1ef146..2d4606c6 100644 --- a/manifest/mocks/status_client.go +++ b/manifest/mocks/status_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -76,13 +76,12 @@ func (_c *StatusClient_Status_Call) RunAndReturn(run func(context.Context) (*man return _c } -type mockConstructorTestingTNewStatusClient interface { +// NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStatusClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStatusClient(t mockConstructorTestingTNewStatusClient) *StatusClient { +}) *StatusClient { mock := &StatusClient{} mock.Mock.Test(t) diff --git a/mocks/client.go b/mocks/client.go index 6e263753..7125b63f 100644 --- a/mocks/client.go +++ b/mocks/client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -313,13 +313,12 @@ func (_c *Client_Validate_Call) RunAndReturn(run func(context.Context, types.Add return _c } -type mockConstructorTestingTNewClient interface { +// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewClient creates a new instance of Client. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewClient(t mockConstructorTestingTNewClient) *Client { +}) *Client { mock := &Client{} mock.Mock.Test(t) diff --git a/mocks/status_client.go b/mocks/status_client.go index 5d8e7cd4..5b470185 100644 --- a/mocks/status_client.go +++ b/mocks/status_client.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.24.0. DO NOT EDIT. +// Code generated by mockery v2.32.0. DO NOT EDIT. package mocks @@ -76,13 +76,12 @@ func (_c *StatusClient_Status_Call) RunAndReturn(run func(context.Context) (*pro return _c } -type mockConstructorTestingTNewStatusClient interface { +// NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewStatusClient(t interface { mock.TestingT Cleanup(func()) -} - -// NewStatusClient creates a new instance of StatusClient. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. -func NewStatusClient(t mockConstructorTestingTNewStatusClient) *StatusClient { +}) *StatusClient { mock := &StatusClient{} mock.Mock.Test(t) diff --git a/operator/hostnameoperator/hostname_operator.go b/operator/hostnameoperator/hostname_operator.go index f0b6eee3..46721d02 100644 --- a/operator/hostnameoperator/hostname_operator.go +++ b/operator/hostnameoperator/hostname_operator.go @@ -12,8 +12,6 @@ import ( manifest "github.com/akash-network/akash-api/go/manifest/v2beta2" mtypes "github.com/akash-network/akash-api/go/node/market/v1beta3" - sdlutil "github.com/akash-network/node/sdl/util" - "github.com/spf13/cobra" "github.com/spf13/viper" "github.com/tendermint/tendermint/libs/log" @@ -334,10 +332,12 @@ func locateServiceFromManifest(ctx context.Context, client cluster.Client, lease continue } - if externalPort == uint32(sdlutil.ExposeExternalPort(manifest.ServiceExpose{ + mse := manifest.ServiceExpose{ Port: uint32(expose.Port), ExternalPort: uint32(expose.ExternalPort), - })) { + } + + if externalPort == uint32(mse.GetExternalPort()) { selectedExpose = expose break } diff --git a/pkg/apis/akash.network/crd.yaml b/pkg/apis/akash.network/crd.yaml index 38693f3c..296d71a8 100644 --- a/pkg/apis/akash.network/crd.yaml +++ b/pkg/apis/akash.network/crd.yaml @@ -65,9 +65,12 @@ spec: type: array items: type: string - unit: + resources: type: object properties: + id: + type: number + format: uint32 cpu: type: number format: uint32 @@ -166,27 +169,6 @@ spec: type: string model: type: string -# affinity: -# nullable: true -# type: object -# properties: -# node: -# nullable: true -# type: object -# properties: -# required: -# type: array -# items: -# type: object -# properties: -# key: -# type: string -# operator: -# type: string -# values: -# type: array -# items: -# type: string - name: v2beta1 served: true storage: false diff --git a/pkg/apis/akash.network/v2beta2/k8s_integration_test.go b/pkg/apis/akash.network/v2beta2/k8s_integration_test.go index f4048f97..0be8d683 100644 --- a/pkg/apis/akash.network/v2beta2/k8s_integration_test.go +++ b/pkg/apis/akash.network/v2beta2/k8s_integration_test.go @@ -43,7 +43,6 @@ func TestWriteRead(t *testing.T) { SchedulerParams: make([]*crd.SchedulerParams, len(group.Services)), } kmani, err := crd.NewManifest(ns, lid, &group, csettings) - require.NoError(t, err, spec.Name) // save to k8s @@ -89,7 +88,6 @@ func withNamespace(ctx context.Context, t testing.TB, fn func(*rest.Config, stri // invoke callback fn(kcfg, nsname) - } func kubeConfig(t testing.TB) *rest.Config { diff --git a/pkg/apis/akash.network/v2beta2/manifest.go b/pkg/apis/akash.network/v2beta2/manifest.go index b72d549c..343d76e5 100644 --- a/pkg/apis/akash.network/v2beta2/manifest.go +++ b/pkg/apis/akash.network/v2beta2/manifest.go @@ -42,7 +42,7 @@ type ManifestService struct { Env []string `json:"env,omitempty"` // Resource requirements // in current version of CRD it is named as unit - Resources ResourceUnits `json:"unit"` + Resources Resources `json:"resources"` // Number of instances Count uint32 `json:"count,omitempty"` // Overlay Network Links @@ -82,14 +82,6 @@ type ManifestServiceParams struct { Storage []ManifestStorageParams `json:"storage,omitempty"` } -// type NodeAffinity struct { -// Required []corev1.NodeSelectorRequirement `json:"required"` -// } -// -// type Affinity struct { -// Node *NodeAffinity `json:"node"` -// } - type SchedulerResourceGPU struct { Vendor string `json:"vendor"` Model string `json:"model"` @@ -102,7 +94,6 @@ type SchedulerResources struct { type SchedulerParams struct { RuntimeClass string `json:"runtime_class"` Resources *SchedulerResources `json:"resources,omitempty"` - // Affinity *Affinity `json:"affinity"` } type ClusterSettings struct { diff --git a/pkg/apis/akash.network/v2beta2/types.go b/pkg/apis/akash.network/v2beta2/types.go index 1775dfbb..21cce7d6 100644 --- a/pkg/apis/akash.network/v2beta2/types.go +++ b/pkg/apis/akash.network/v2beta2/types.go @@ -86,30 +86,31 @@ func LeaseIDFromAkash(id mtypes.LeaseID) LeaseID { } } -type ResourceUnitsStorage struct { +type ResourcesStorage struct { Name string `json:"name"` Size string `json:"size"` } -// ResourceUnits stores cpu, memory and storage details -type ResourceUnits struct { - CPU uint32 `json:"cpu,omitempty"` - Memory string `json:"memory,omitempty"` - Storage []ResourceUnitsStorage `json:"storage,omitempty"` - GPU uint32 `json:"gpu,omitempty"` +// Resources stores cpu, memory and storage details +type Resources struct { + ID uint32 `json:"id"` + CPU uint32 `json:"cpu"` + GPU uint32 `json:"gpu"` + Memory string `json:"memory"` + Storage []ResourcesStorage `json:"storage,omitempty"` } -func (ru ResourceUnits) fromCRD() (types.ResourceUnits, error) { +func (ru Resources) fromCRD() (types.Resources, error) { memory, err := strconv.ParseUint(ru.Memory, 10, 64) if err != nil { - return types.ResourceUnits{}, err + return types.Resources{}, err } storage := make([]types.Storage, 0, len(ru.Storage)) for _, st := range ru.Storage { size, err := strconv.ParseUint(st.Size, 10, 64) if err != nil { - return types.ResourceUnits{}, err + return types.Resources{}, err } storage = append(storage, types.Storage{ @@ -118,7 +119,8 @@ func (ru ResourceUnits) fromCRD() (types.ResourceUnits, error) { }) } - return types.ResourceUnits{ + return types.Resources{ + ID: ru.ID, CPU: &types.CPU{ Units: types.NewResourceValue(uint64(ru.CPU)), }, @@ -132,14 +134,18 @@ func (ru ResourceUnits) fromCRD() (types.ResourceUnits, error) { }, nil } -func resourceUnitsFromAkash(aru types.ResourceUnits) (ResourceUnits, error) { - res := ResourceUnits{} +func resourceUnitsFromAkash(aru types.Resources) (Resources, error) { + res := Resources{ + ID: aru.ID, + } + if aru.CPU != nil { if aru.CPU.Units.Value() > math.MaxUint32 { - return ResourceUnits{}, errors.New("k8s api: cpu units value overflows uint32") + return Resources{}, errors.New("k8s api: cpu units value overflows uint32") } res.CPU = uint32(aru.CPU.Units.Value()) } + if aru.Memory != nil { res.Memory = strconv.FormatUint(aru.Memory.Quantity.Value(), 10) } @@ -147,14 +153,14 @@ func resourceUnitsFromAkash(aru types.ResourceUnits) (ResourceUnits, error) { if aru.GPU != nil { // todo boundary check if aru.GPU.Units.Value() > math.MaxUint32 { - return ResourceUnits{}, errors.New("k8s api: gpu units value overflows uint32") + return Resources{}, errors.New("k8s api: gpu units value overflows uint32") } res.GPU = uint32(aru.GPU.Units.Value()) } - res.Storage = make([]ResourceUnitsStorage, 0, len(aru.Storage)) + res.Storage = make([]ResourcesStorage, 0, len(aru.Storage)) for _, storage := range aru.Storage { - res.Storage = append(res.Storage, ResourceUnitsStorage{ + res.Storage = append(res.Storage, ResourcesStorage{ Name: storage.Name, Size: strconv.FormatUint(storage.Quantity.Value(), 10), }) diff --git a/pkg/apis/akash.network/v2beta2/zz_generated.deepcopy.go b/pkg/apis/akash.network/v2beta2/zz_generated.deepcopy.go index f39f0aef..79bebe9d 100644 --- a/pkg/apis/akash.network/v2beta2/zz_generated.deepcopy.go +++ b/pkg/apis/akash.network/v2beta2/zz_generated.deepcopy.go @@ -766,38 +766,38 @@ func (in *ResourcePair) DeepCopy() *ResourcePair { } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceUnits) DeepCopyInto(out *ResourceUnits) { +func (in *Resources) DeepCopyInto(out *Resources) { *out = *in if in.Storage != nil { in, out := &in.Storage, &out.Storage - *out = make([]ResourceUnitsStorage, len(*in)) + *out = make([]ResourcesStorage, len(*in)) copy(*out, *in) } return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceUnits. -func (in *ResourceUnits) DeepCopy() *ResourceUnits { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new Resources. +func (in *Resources) DeepCopy() *Resources { if in == nil { return nil } - out := new(ResourceUnits) + out := new(Resources) in.DeepCopyInto(out) return out } // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. -func (in *ResourceUnitsStorage) DeepCopyInto(out *ResourceUnitsStorage) { +func (in *ResourcesStorage) DeepCopyInto(out *ResourcesStorage) { *out = *in return } -// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceUnitsStorage. -func (in *ResourceUnitsStorage) DeepCopy() *ResourceUnitsStorage { +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourcesStorage. +func (in *ResourcesStorage) DeepCopy() *ResourcesStorage { if in == nil { return nil } - out := new(ResourceUnitsStorage) + out := new(ResourcesStorage) in.DeepCopyInto(out) return out } diff --git a/service.go b/service.go index f2e62465..3aa0bc42 100644 --- a/service.go +++ b/service.go @@ -3,7 +3,6 @@ package provider import ( "context" - atypes "github.com/akash-network/akash-api/go/node/types/v1beta3" "github.com/boz/go-lifecycle" "github.com/pkg/errors" @@ -268,18 +267,18 @@ func (s *service) run() { } type reservation struct { - resources atypes.ResourceGroup - adjustedResources []atypes.Resources + resources dtypes.ResourceGroup + adjustedResources dtypes.ResourceUnits clusterParams interface{} } var _ ctypes.ReservationGroup = (*reservation)(nil) -func (r *reservation) Resources() atypes.ResourceGroup { +func (r *reservation) Resources() dtypes.ResourceGroup { return r.resources } -func (r *reservation) SetAllocatedResources(val []atypes.Resources) { +func (r *reservation) SetAllocatedResources(val dtypes.ResourceUnits) { r.adjustedResources = val } diff --git a/testdata/deployment/deployment-v2-storage-default.yaml b/testdata/deployment/deployment-v2-storage-default.yaml index de9fcd3b..84b5b513 100644 --- a/testdata/deployment/deployment-v2-storage-default.yaml +++ b/testdata/deployment/deployment-v2-storage-default.yaml @@ -14,6 +14,15 @@ services: storage: data: mount: /var/lib/e2e-test + bew: + image: ovrclk/e2e-test + expose: + - port: 8080 + as: 81 + to: + - global: true + accept: + - webdistest2.localhost profiles: compute: web: @@ -28,14 +37,29 @@ profiles: size: "128Mi" attributes: persistent: "true" + bew: + resources: + cpu: + units: "0.01" + memory: + size: "128Mi" + storage: + - size: "512Mi" placement: global: pricing: web: denom: uakt amount: 10 + bew: + denom: uakt + amount: 10 deployment: web: global: profile: web count: 1 + bew: + global: + profile: bew + count: 1 diff --git a/testutil/manifest/v2beta2/manifest_app.go b/testutil/manifest/v2beta2/manifest_app.go index 22365fef..c16ea7d5 100644 --- a/testutil/manifest/v2beta2/manifest_app.go +++ b/testutil/manifest/v2beta2/manifest_app.go @@ -36,7 +36,8 @@ func (mg manifestGeneratorApp) Service(t testing.TB) manifest.Service { return manifest.Service{ Name: "demo", Image: "ropes/akash-app:v1", - Resources: types.ResourceUnits{ + Resources: types.Resources{ + ID: 1, CPU: &types.CPU{ Units: types.NewResourceValue(100), }, diff --git a/testutil/manifest/v2beta2/manifest_overflow.go b/testutil/manifest/v2beta2/manifest_overflow.go index 2b03904d..34990229 100644 --- a/testutil/manifest/v2beta2/manifest_overflow.go +++ b/testutil/manifest/v2beta2/manifest_overflow.go @@ -38,7 +38,8 @@ func (mg manifestGeneratorOverflow) Service(t testing.TB) manifest.Service { Image: "quay.io/ovrclk/demo-app", Args: []string{"run"}, Env: []string{"AKASH_TEST_SERVICE=true"}, - Resources: types.ResourceUnits{ + Resources: types.Resources{ + ID: 1, CPU: &types.CPU{ Units: types.NewResourceValue(math.MaxUint32), },