diff --git a/cmd/metal-api/internal/datastore/machine.go b/cmd/metal-api/internal/datastore/machine.go index dcc32a2d2..4975ad037 100644 --- a/cmd/metal-api/internal/datastore/machine.go +++ b/cmd/metal-api/internal/datastore/machine.go @@ -428,12 +428,12 @@ func (rs *RethinkStore) UpdateMachine(oldMachine *metal.Machine, newMachine *met // FindWaitingMachine returns an available, not allocated, waiting and alive machine of given size within the given partition. // TODO: the algorithm can be optimized / shortened by using a rethinkdb join command and then using .Sample(1) // but current implementation should have a slightly better readability. -func (rs *RethinkStore) FindWaitingMachine(projectid, partitionid, sizeid string, placementTags []string) (*metal.Machine, error) { +func (rs *RethinkStore) FindWaitingMachine(projectid, partitionid string, size metal.Size, placementTags []string) (*metal.Machine, error) { q := *rs.machineTable() q = q.Filter(map[string]interface{}{ "allocation": nil, "partitionid": partitionid, - "sizeid": sizeid, + "sizeid": size.ID, "state": map[string]string{ "value": string(metal.AvailableState), }, @@ -467,21 +467,25 @@ func (rs *RethinkStore) FindWaitingMachine(projectid, partitionid, sizeid string available = append(available, m) } - if available == nil || len(available) < 1 { + if len(available) == 0 { return nil, errors.New("no machine available") } - query := MachineSearchQuery{ - AllocationProject: &projectid, - PartitionID: &partitionid, - } - - var projectMachines metal.Machines - err = rs.SearchMachines(&query, &projectMachines) + var partitionMachines metal.Machines + err = rs.SearchMachines(&MachineSearchQuery{ + PartitionID: &partitionid, + }, &partitionMachines) if err != nil { return nil, err } + ok := checkSizeReservations(available, projectid, partitionid, partitionMachines.WithSize(size.ID).ByProjectID(), size) + if !ok { + return nil, errors.New("no machine available") + } + + projectMachines := partitionMachines.ByProjectID()[projectid] + spreadCandidates := spreadAcrossRacks(available, projectMachines, placementTags) if len(spreadCandidates) == 0 { return nil, errors.New("no machine available") @@ -499,6 +503,37 @@ func (rs *RethinkStore) FindWaitingMachine(projectid, partitionid, sizeid string return &newMachine, nil } +// checkSizeReservations returns true when an allocation is possible and +// false when size reservations prevent the allocation for the given project in the given partition +func checkSizeReservations(available metal.Machines, projectid, partitionid string, machinesByProject map[string]metal.Machines, size metal.Size) bool { + if size.Reservations == nil { + return true + } + + var ( + reservations = 0 + ) + + for _, r := range size.Reservations.ForPartition(partitionid) { + r := r + + // sum up the amount of reservations + reservations += r.Amount + + alreadyAllocated := len(machinesByProject[r.ProjectID]) + + if projectid == r.ProjectID && alreadyAllocated < r.Amount { + // allow allocation for the project when it has a reservation and there are still allocations left + return true + } + + // substract already used up reservations of the project + reservations = max(reservations-alreadyAllocated, 0) + } + + return reservations < len(available) +} + func spreadAcrossRacks(allMachines, projectMachines metal.Machines, tags []string) metal.Machines { var ( allRacks = groupByRack(allMachines) diff --git a/cmd/metal-api/internal/datastore/machine_test.go b/cmd/metal-api/internal/datastore/machine_test.go index f030cb684..09bdd31be 100644 --- a/cmd/metal-api/internal/datastore/machine_test.go +++ b/cmd/metal-api/internal/datastore/machine_test.go @@ -9,6 +9,7 @@ import ( "github.com/google/go-cmp/cmp" "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" + "github.com/stretchr/testify/require" "golang.org/x/exp/slices" ) @@ -718,3 +719,151 @@ func getTestMachines(numPerRack int, rackids []string, tags []string) metal.Mach return machines } + +func Test_checkSizeReservations(t *testing.T) { + var ( + available = metal.Machines{ + {Base: metal.Base{ID: "1"}}, + {Base: metal.Base{ID: "2"}}, + {Base: metal.Base{ID: "3"}}, + {Base: metal.Base{ID: "4"}}, + {Base: metal.Base{ID: "5"}}, + } + + partitionA = "a" + p0 = "0" + p1 = "1" + p2 = "2" + + size = metal.Size{ + Base: metal.Base{ + ID: "c1-xlarge-x86", + }, + Reservations: metal.Reservations{ + { + Amount: 1, + ProjectID: p1, + PartitionIDs: []string{partitionA}, + }, + { + Amount: 2, + ProjectID: p2, + PartitionIDs: []string{partitionA}, + }, + }, + } + + projectMachines = map[string]metal.Machines{} + + allocate = func(id, project string) { + available = slices.DeleteFunc(available, func(m metal.Machine) bool { + return m.ID == id + }) + projectMachines[project] = append(projectMachines[project], metal.Machine{Base: metal.Base{ID: id}}) + } + ) + + // 5 available, 3 reserved, project 0 can allocate + ok := checkSizeReservations(available, p0, partitionA, projectMachines, size) + require.True(t, ok) + allocate(available[0].ID, p0) + + require.Equal(t, metal.Machines{ + {Base: metal.Base{ID: "2"}}, + {Base: metal.Base{ID: "3"}}, + {Base: metal.Base{ID: "4"}}, + {Base: metal.Base{ID: "5"}}, + }, available) + require.Equal(t, map[string]metal.Machines{ + p0: { + {Base: metal.Base{ID: "1"}}, + }, + }, projectMachines) + + // 4 available, 3 reserved, project 2 can allocate + ok = checkSizeReservations(available, p2, partitionA, projectMachines, size) + require.True(t, ok) + allocate(available[0].ID, p2) + + require.Equal(t, metal.Machines{ + {Base: metal.Base{ID: "3"}}, + {Base: metal.Base{ID: "4"}}, + {Base: metal.Base{ID: "5"}}, + }, available) + require.Equal(t, map[string]metal.Machines{ + p0: { + {Base: metal.Base{ID: "1"}}, + }, + p2: { + {Base: metal.Base{ID: "2"}}, + }, + }, projectMachines) + + // 3 available, 3 reserved (1 used), project 0 can allocate + ok = checkSizeReservations(available, p0, partitionA, projectMachines, size) + require.True(t, ok) + allocate(available[0].ID, p0) + + require.Equal(t, metal.Machines{ + {Base: metal.Base{ID: "4"}}, + {Base: metal.Base{ID: "5"}}, + }, available) + require.Equal(t, map[string]metal.Machines{ + p0: { + {Base: metal.Base{ID: "1"}}, + {Base: metal.Base{ID: "3"}}, + }, + p2: { + {Base: metal.Base{ID: "2"}}, + }, + }, projectMachines) + + // 2 available, 3 reserved (1 used), project 0 cannot allocate anymore + ok = checkSizeReservations(available, p0, partitionA, projectMachines, size) + require.False(t, ok) + + // 2 available, 3 reserved (1 used), project 2 can allocate + ok = checkSizeReservations(available, p2, partitionA, projectMachines, size) + require.True(t, ok) + allocate(available[0].ID, p2) + + require.Equal(t, metal.Machines{ + {Base: metal.Base{ID: "5"}}, + }, available) + require.Equal(t, map[string]metal.Machines{ + p0: { + {Base: metal.Base{ID: "1"}}, + {Base: metal.Base{ID: "3"}}, + }, + p2: { + {Base: metal.Base{ID: "2"}}, + {Base: metal.Base{ID: "4"}}, + }, + }, projectMachines) + + // 1 available, 3 reserved (2 used), project 0 and 2 cannot allocate anymore + ok = checkSizeReservations(available, p0, partitionA, projectMachines, size) + require.False(t, ok) + ok = checkSizeReservations(available, p2, partitionA, projectMachines, size) + require.False(t, ok) + + // 1 available, 3 reserved (2 used), project 1 can allocate + ok = checkSizeReservations(available, p1, partitionA, projectMachines, size) + require.True(t, ok) + allocate(available[0].ID, p1) + + require.Equal(t, metal.Machines{}, available) + require.Equal(t, map[string]metal.Machines{ + p0: { + {Base: metal.Base{ID: "1"}}, + {Base: metal.Base{ID: "3"}}, + }, + p1: { + {Base: metal.Base{ID: "5"}}, + }, + p2: { + {Base: metal.Base{ID: "2"}}, + {Base: metal.Base{ID: "4"}}, + }, + }, projectMachines) +} diff --git a/cmd/metal-api/internal/grpc/boot-service.go b/cmd/metal-api/internal/grpc/boot-service.go index 4f35c4aed..b8d99025c 100644 --- a/cmd/metal-api/internal/grpc/boot-service.go +++ b/cmd/metal-api/internal/grpc/boot-service.go @@ -133,7 +133,7 @@ func (b *BootService) Register(ctx context.Context, req *v1.BootServiceRegisterR size, _, err := b.ds.FromHardware(machineHardware) if err != nil { - size = metal.UnknownSize + size = metal.UnknownSize() b.log.Errorw("no size found for hardware, defaulting to unknown size", "hardware", machineHardware, "error", err) } diff --git a/cmd/metal-api/internal/grpc/boot-service_test.go b/cmd/metal-api/internal/grpc/boot-service_test.go index c15954b10..a112e811f 100644 --- a/cmd/metal-api/internal/grpc/boot-service_test.go +++ b/cmd/metal-api/internal/grpc/boot-service_test.go @@ -90,7 +90,7 @@ func TestBootService_Register(t *testing.T) { neighbormac2: testdata.Switch2.Nics[0].MacAddress, numcores: 2, memory: 100, - expectedSizeId: metal.UnknownSize.ID, + expectedSizeId: metal.UnknownSize().ID, }, } @@ -109,7 +109,7 @@ func TestBootService_Register(t *testing.T) { Conflict: "replace", })).Return(testdata.EmptyResult, nil) } - mock.On(r.DB("mockdb").Table("size").Get(metal.UnknownSize.ID)).Return([]metal.Size{*metal.UnknownSize}, nil) + mock.On(r.DB("mockdb").Table("size").Get(metal.UnknownSize().ID)).Return([]metal.Size{*metal.UnknownSize()}, nil) mock.On(r.DB("mockdb").Table("switch").Filter(r.MockAnything(), r.FilterOpts{})).Return([]metal.Switch{testdata.Switch1, testdata.Switch2}, nil) mock.On(r.DB("mockdb").Table("event").Filter(r.MockAnything(), r.FilterOpts{})).Return([]metal.ProvisioningEventContainer{}, nil) mock.On(r.DB("mockdb").Table("event").Insert(r.MockAnything(), r.InsertOpts{})).Return(testdata.EmptyResult, nil) diff --git a/cmd/metal-api/internal/metal/machine.go b/cmd/metal-api/internal/metal/machine.go index 0195ec646..ecd246604 100644 --- a/cmd/metal-api/internal/metal/machine.go +++ b/cmd/metal-api/internal/metal/machine.go @@ -171,6 +171,22 @@ func (ms Machines) ByProjectID() map[string]Machines { return res } +func (ms Machines) WithSize(id string) Machines { + var res Machines + + for _, m := range ms { + m := m + + if m.SizeID != id { + continue + } + + res = append(res, m) + } + + return res +} + // MachineNetwork stores the Network details of the machine type MachineNetwork struct { NetworkID string `rethinkdb:"networkid" json:"networkid"` diff --git a/cmd/metal-api/internal/metal/size.go b/cmd/metal-api/internal/metal/size.go index 67d996ed9..8d0f3852a 100644 --- a/cmd/metal-api/internal/metal/size.go +++ b/cmd/metal-api/internal/metal/size.go @@ -2,22 +2,29 @@ package metal import ( "fmt" -) + "slices" -// UnknownSize is the size to use, when someone requires a size we do not know. -var UnknownSize = &Size{ - Base: Base{ - ID: "unknown", - Name: "unknown", - }, -} + mdmv1 "github.com/metal-stack/masterdata-api/api/v1" +) // A Size represents a supported machine size. type Size struct { Base - Constraints []Constraint `rethinkdb:"constraints" json:"constraints"` + Constraints []Constraint `rethinkdb:"constraints" json:"constraints"` + Reservations Reservations `rethinkdb:"reservations" json:"reservations"` + Labels map[string]string `rethinkdb:"labels" json:"labels"` +} + +// Reservation defines a reservation of a size for machine allocations +type Reservation struct { + Amount int `rethinkdb:"amount" json:"amount"` + Description string `rethinkdb:"description" json:"description"` + ProjectID string `rethinkdb:"projectid" json:"projectid"` + PartitionIDs []string `rethinkdb:"partitionids" json:"partitionids"` } +type Reservations []Reservation + // ConstraintType ... type ConstraintType string @@ -51,6 +58,16 @@ func (sz Sizes) ByID() SizeMap { return res } +// UnknownSize is the size to use, when someone requires a size we do not know. +func UnknownSize() *Size { + return &Size{ + Base: Base{ + ID: "unknown", + Name: "unknown", + }, + } +} + // Matches returns true if the given machine hardware is inside the min/max values of the // constraint. func (c *Constraint) Matches(hw MachineHardware) (ConstraintMatchingLog, bool) { @@ -118,12 +135,17 @@ func (s *Size) overlaps(so *Size) bool { } // Validate a size, returns error if a invalid size is passed -func (s *Size) Validate() error { +func (s *Size) Validate(partitions PartitionMap, projects map[string]*mdmv1.Project) error { for _, c := range s.Constraints { if c.Max < c.Min { return fmt.Errorf("size:%q type:%q max:%d is smaller than min:%d", s.ID, c.Type, c.Max, c.Min) } } + + if err := s.Reservations.Validate(partitions, projects); err != nil { + return fmt.Errorf("invalid size reservation: %w", err) + } + return nil } @@ -138,6 +160,73 @@ func (s *Size) Overlaps(ss *Sizes) *Size { return nil } +func (rs *Reservations) ForPartition(partitionID string) Reservations { + if rs == nil { + return nil + } + + var result Reservations + for _, r := range *rs { + r := r + if slices.Contains(r.PartitionIDs, partitionID) { + result = append(result, r) + } + } + + return result +} + +func (rs *Reservations) ForProject(projectID string) Reservations { + if rs == nil { + return nil + } + + var result Reservations + for _, r := range *rs { + r := r + if r.ProjectID == projectID { + result = append(result, r) + } + } + + return result +} + +func (rs *Reservations) Validate(partitions PartitionMap, projects map[string]*mdmv1.Project) error { + if rs == nil { + return nil + } + + for _, r := range *rs { + if r.Amount <= 0 { + return fmt.Errorf("amount must be a positive integer") + } + + if len(r.PartitionIDs) == 0 { + return fmt.Errorf("at least one partition id must be specified") + } + ids := map[string]bool{} + for _, partition := range r.PartitionIDs { + ids[partition] = true + if _, ok := partitions[partition]; !ok { + return fmt.Errorf("partition must exist before creating a size reservation") + } + } + if len(ids) != len(r.PartitionIDs) { + return fmt.Errorf("partitions must not contain duplicates") + } + + if r.ProjectID == "" { + return fmt.Errorf("project id must be specified") + } + if _, ok := projects[r.ProjectID]; !ok { + return fmt.Errorf("project must exist before creating a size reservation") + } + } + + return nil +} + // A ConstraintMatchingLog is used do return a log message to the caller // beside the contraint itself. type ConstraintMatchingLog struct { diff --git a/cmd/metal-api/internal/metal/size_test.go b/cmd/metal-api/internal/metal/size_test.go index 72303d571..113799420 100644 --- a/cmd/metal-api/internal/metal/size_test.go +++ b/cmd/metal-api/internal/metal/size_test.go @@ -1,10 +1,14 @@ package metal import ( + "fmt" "reflect" "testing" + "github.com/google/go-cmp/cmp" + mdmv1 "github.com/metal-stack/masterdata-api/api/v1" "github.com/metal-stack/metal-lib/pkg/pointer" + "github.com/metal-stack/metal-lib/pkg/testcommon" "github.com/stretchr/testify/require" ) @@ -541,7 +545,7 @@ func TestSizes_Overlaps(t *testing.T) { for i := range tests { tt := tests[i] t.Run(tt.name, func(t *testing.T) { - err := tt.sz.Validate() + err := tt.sz.Validate(nil, nil) require.NoError(t, err) got := tt.sz.Overlaps(&tt.args.sizes) if !reflect.DeepEqual(got, tt.want) { @@ -608,7 +612,7 @@ func TestSize_Validate(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.size.Validate() + err := tt.size.Validate(nil, nil) if err != nil { require.EqualError(t, err, *tt.wantErrMessage) } @@ -618,3 +622,272 @@ func TestSize_Validate(t *testing.T) { }) } } + +func TestReservations_ForPartition(t *testing.T) { + tests := []struct { + name string + rs *Reservations + partitionID string + want Reservations + }{ + { + name: "nil", + rs: nil, + partitionID: "a", + want: nil, + }, + { + name: "correctly filtered", + rs: &Reservations{ + { + PartitionIDs: []string{"a", "b"}, + }, + { + PartitionIDs: []string{"c"}, + }, + { + PartitionIDs: []string{"a"}, + }, + }, + partitionID: "a", + want: Reservations{ + { + PartitionIDs: []string{"a", "b"}, + }, + { + PartitionIDs: []string{"a"}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.rs.ForPartition(tt.partitionID); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Reservations.ForPartition() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestReservations_ForProject(t *testing.T) { + tests := []struct { + name string + rs *Reservations + projectID string + want Reservations + }{ + { + name: "nil", + rs: nil, + projectID: "a", + want: nil, + }, + { + name: "correctly filtered", + rs: &Reservations{ + { + ProjectID: "a", + }, + { + ProjectID: "c", + }, + { + ProjectID: "a", + }, + }, + projectID: "a", + want: Reservations{ + { + ProjectID: "a", + }, + { + ProjectID: "a", + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tt.rs.ForProject(tt.projectID); !reflect.DeepEqual(got, tt.want) { + t.Errorf("Reservations.ForProject() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestReservations_Validate(t *testing.T) { + tests := []struct { + name string + partitions PartitionMap + projects map[string]*mdmv1.Project + rs *Reservations + wantErr error + }{ + { + name: "empty reservations", + partitions: nil, + projects: nil, + rs: nil, + wantErr: nil, + }, + { + name: "invalid amount", + partitions: PartitionMap{ + "a": Partition{}, + "b": Partition{}, + "c": Partition{}, + }, + projects: map[string]*mdmv1.Project{ + "1": {}, + "2": {}, + "3": {}, + }, + rs: &Reservations{ + { + Amount: -3, + Description: "test", + ProjectID: "3", + PartitionIDs: []string{"b"}, + }, + }, + wantErr: fmt.Errorf("amount must be a positive integer"), + }, + { + name: "no partitions referenced", + partitions: PartitionMap{ + "a": Partition{}, + "b": Partition{}, + "c": Partition{}, + }, + projects: map[string]*mdmv1.Project{ + "1": {}, + "2": {}, + "3": {}, + }, + rs: &Reservations{ + { + Amount: 3, + Description: "test", + ProjectID: "3", + }, + }, + wantErr: fmt.Errorf("at least one partition id must be specified"), + }, + { + name: "partition does not exist", + partitions: PartitionMap{ + "a": Partition{}, + "b": Partition{}, + "c": Partition{}, + }, + projects: map[string]*mdmv1.Project{ + "1": {}, + "2": {}, + "3": {}, + }, + rs: &Reservations{ + { + Amount: 3, + Description: "test", + ProjectID: "3", + PartitionIDs: []string{"d"}, + }, + }, + wantErr: fmt.Errorf("partition must exist before creating a size reservation"), + }, + { + name: "partition duplicates", + partitions: PartitionMap{ + "a": Partition{}, + "b": Partition{}, + "c": Partition{}, + }, + projects: map[string]*mdmv1.Project{ + "1": {}, + "2": {}, + "3": {}, + }, + rs: &Reservations{ + { + Amount: 3, + Description: "test", + ProjectID: "3", + PartitionIDs: []string{"a", "b", "c", "b"}, + }, + }, + wantErr: fmt.Errorf("partitions must not contain duplicates"), + }, + { + name: "no project referenced", + partitions: PartitionMap{ + "a": Partition{}, + "b": Partition{}, + "c": Partition{}, + }, + projects: map[string]*mdmv1.Project{ + "1": {}, + "2": {}, + "3": {}, + }, + rs: &Reservations{ + { + Amount: 3, + Description: "test", + PartitionIDs: []string{"a"}, + }, + }, + wantErr: fmt.Errorf("project id must be specified"), + }, + { + name: "project does not exist", + partitions: PartitionMap{ + "a": Partition{}, + "b": Partition{}, + "c": Partition{}, + }, + projects: map[string]*mdmv1.Project{ + "1": {}, + "2": {}, + "3": {}, + }, + rs: &Reservations{ + { + Amount: 3, + Description: "test", + ProjectID: "4", + PartitionIDs: []string{"a"}, + }, + }, + wantErr: fmt.Errorf("project must exist before creating a size reservation"), + }, + { + name: "valid reservation", + partitions: PartitionMap{ + "a": Partition{}, + "b": Partition{}, + "c": Partition{}, + }, + projects: map[string]*mdmv1.Project{ + "1": {}, + "2": {}, + "3": {}, + }, + rs: &Reservations{ + { + Amount: 3, + Description: "test", + ProjectID: "2", + PartitionIDs: []string{"b", "c"}, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.rs.Validate(tt.partitions, tt.projects) + if diff := cmp.Diff(tt.wantErr, err, testcommon.ErrorStringComparer()); diff != "" { + t.Errorf("error diff (-want +got):\n%s", diff) + } + }) + } +} diff --git a/cmd/metal-api/internal/service/integration_test.go b/cmd/metal-api/internal/service/integration_test.go index 772c4c864..59fe11020 100644 --- a/cmd/metal-api/internal/service/integration_test.go +++ b/cmd/metal-api/internal/service/integration_test.go @@ -36,6 +36,7 @@ import ( v1 "github.com/metal-stack/metal-api/cmd/metal-api/internal/service/v1" grpcv1 "github.com/metal-stack/metal-api/pkg/api/v1" + testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -75,11 +76,14 @@ func createTestEnvironment(t *testing.T) testEnv { require.NoError(t, err) psc := &mdmv1mock.ProjectServiceClient{} - psc.On("Get", context.Background(), &mdmv1.ProjectGetRequest{Id: "test-project-1"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{ + psc.On("Get", testifymock.Anything, &mdmv1.ProjectGetRequest{Id: "test-project-1"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{ Meta: &mdmv1.Meta{ Id: "test-project-1", }, }}, nil) + psc.On("Find", testifymock.Anything, &mdmv1.ProjectFindRequest{}).Return(&mdmv1.ProjectListResponse{Projects: []*mdmv1.Project{ + {Meta: &mdmv1.Meta{Id: "test-project-1"}}, + }}, nil) mdc := mdm.NewMock(psc, nil) log := zaptest.NewLogger(t).Sugar() @@ -104,7 +108,7 @@ func createTestEnvironment(t *testing.T) testEnv { require.NoError(t, err) imageService := NewImage(log, ds) switchService := NewSwitch(log, ds) - sizeService := NewSize(log, ds) + sizeService := NewSize(log, ds, mdc) sizeImageConstraintService := NewSizeImageConstraint(log, ds) networkService := NewNetwork(log, ds, ipamer, mdc) partitionService := NewPartition(log, ds, &emptyPublisher{}) diff --git a/cmd/metal-api/internal/service/ip-service.go b/cmd/metal-api/internal/service/ip-service.go index e74505ae7..cf7a504b1 100644 --- a/cmd/metal-api/internal/service/ip-service.go +++ b/cmd/metal-api/internal/service/ip-service.go @@ -1,7 +1,6 @@ package service import ( - "context" "errors" "fmt" "net/http" @@ -294,7 +293,7 @@ func (r *ipResource) allocateIP(request *restful.Request, response *restful.Resp return } - p, err := r.mdc.Project().Get(context.Background(), &mdmv1.ProjectGetRequest{Id: requestPayload.ProjectID}) + p, err := r.mdc.Project().Get(request.Request.Context(), &mdmv1.ProjectGetRequest{Id: requestPayload.ProjectID}) if err != nil { r.sendError(request, response, defaultError(err)) return diff --git a/cmd/metal-api/internal/service/ip-service_test.go b/cmd/metal-api/internal/service/ip-service_test.go index a1b2bb099..42f2d7ec7 100644 --- a/cmd/metal-api/internal/service/ip-service_test.go +++ b/cmd/metal-api/internal/service/ip-service_test.go @@ -2,7 +2,6 @@ package service import ( "bytes" - "context" "encoding/json" "errors" "net/http" @@ -27,6 +26,7 @@ import ( "github.com/google/go-cmp/cmp" goipam "github.com/metal-stack/go-ipam" + testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" restful "github.com/emicklei/go-restful/v3" @@ -174,7 +174,7 @@ func TestAllocateIP(t *testing.T) { logger := zaptest.NewLogger(t).Sugar() psc := mdmock.ProjectServiceClient{} - psc.On("Get", context.Background(), &mdmv1.ProjectGetRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{ + psc.On("Get", testifymock.Anything, &mdmv1.ProjectGetRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{ Project: &mdmv1.Project{ Meta: &mdmv1.Meta{Id: "project-1"}, }, diff --git a/cmd/metal-api/internal/service/machine-service.go b/cmd/metal-api/internal/service/machine-service.go index 54e9b4821..041d52ee7 100644 --- a/cmd/metal-api/internal/service/machine-service.go +++ b/cmd/metal-api/internal/service/machine-service.go @@ -1317,7 +1317,7 @@ func findWaitingMachine(ds *datastore.RethinkStore, allocationSpec *machineAlloc return nil, fmt.Errorf("partition cannot be found: %w", err) } - machine, err := ds.FindWaitingMachine(allocationSpec.ProjectID, partition.ID, size.ID, allocationSpec.PlacementTags) + machine, err := ds.FindWaitingMachine(allocationSpec.ProjectID, partition.ID, *size, allocationSpec.PlacementTags) if err != nil { return nil, err } @@ -2319,8 +2319,8 @@ func findMachineReferencedEntities(m *metal.Machine, ds *datastore.RethinkStore) var s *metal.Size if m.SizeID != "" { - if m.SizeID == metal.UnknownSize.GetID() { - s = metal.UnknownSize + if m.SizeID == metal.UnknownSize().GetID() { + s = metal.UnknownSize() } else { s, err = ds.FindSize(m.SizeID) if err != nil { diff --git a/cmd/metal-api/internal/service/machine-service_allocation_test.go b/cmd/metal-api/internal/service/machine-service_allocation_test.go index 92b7a8d7f..38d788600 100644 --- a/cmd/metal-api/internal/service/machine-service_allocation_test.go +++ b/cmd/metal-api/internal/service/machine-service_allocation_test.go @@ -21,6 +21,7 @@ import ( "github.com/avast/retry-go/v4" "github.com/emicklei/go-restful/v3" + testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" "golang.org/x/sync/errgroup" @@ -303,7 +304,7 @@ func setupTestEnvironment(machineCount int, t *testing.T) (*datastore.RethinkSto require.NoError(t, err) psc := &mdmv1mock.ProjectServiceClient{} - psc.On("Get", context.Background(), &mdmv1.ProjectGetRequest{Id: "pr1"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + psc.On("Get", testifymock.Anything, &mdmv1.ProjectGetRequest{Id: "pr1"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) mdc := mdm.NewMock(psc, nil) _, pg, err := test.StartPostgres() diff --git a/cmd/metal-api/internal/service/network-service.go b/cmd/metal-api/internal/service/network-service.go index 727a83ee9..0872f05c7 100644 --- a/cmd/metal-api/internal/service/network-service.go +++ b/cmd/metal-api/internal/service/network-service.go @@ -1,7 +1,6 @@ package service import ( - "context" "errors" "fmt" "net/http" @@ -242,7 +241,7 @@ func (r *networkResource) createNetwork(request *restful.Request, response *rest nat := requestPayload.Nat if projectID != "" { - _, err = r.mdc.Project().Get(context.Background(), &mdmv1.ProjectGetRequest{Id: projectID}) + _, err = r.mdc.Project().Get(request.Request.Context(), &mdmv1.ProjectGetRequest{Id: projectID}) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -445,7 +444,7 @@ func (r *networkResource) allocateNetwork(request *restful.Request, response *re return } - project, err := r.mdc.Project().Get(context.Background(), &mdmv1.ProjectGetRequest{Id: projectID}) + project, err := r.mdc.Project().Get(request.Request.Context(), &mdmv1.ProjectGetRequest{Id: projectID}) if err != nil { r.sendError(request, response, defaultError(err)) return diff --git a/cmd/metal-api/internal/service/partition-service.go b/cmd/metal-api/internal/service/partition-service.go index ee1a90173..615dc95b8 100644 --- a/cmd/metal-api/internal/service/partition-service.go +++ b/cmd/metal-api/internal/service/partition-service.go @@ -362,6 +362,11 @@ func (r *partitionResource) calcPartitionCapacity(pcr *v1.PartitionCapacityReque return nil, fmt.Errorf("unable to fetch provisioning event containers: %w", err) } + sizes, err := r.ds.ListSizes() + if err != nil { + return nil, fmt.Errorf("unable to list sizes: %w", err) + } + machinesWithIssues, err := issues.Find(&issues.Config{ Machines: ms, EventContainers: ecs, @@ -371,8 +376,12 @@ func (r *partitionResource) calcPartitionCapacity(pcr *v1.PartitionCapacityReque return nil, fmt.Errorf("unable to calculate machine issues: %w", err) } - partitionsByID := ps.ByID() - ecsByID := ecs.ByID() + var ( + partitionsByID = ps.ByID() + ecsByID = ecs.ByID() + sizesByID = sizes.ByID() + machinesByProject = ms.ByProjectID() + ) for _, m := range ms { m := m @@ -404,15 +413,15 @@ func (r *partitionResource) calcPartitionCapacity(pcr *v1.PartitionCapacityReque } pcs[m.PartitionID] = pc - size := metal.UnknownSize.ID - if m.SizeID != "" { - size = m.SizeID + size, ok := sizesByID[m.SizeID] + if !ok { + size = *metal.UnknownSize() } - cap := pc.ServerCapacities.FindBySize(size) + cap := pc.ServerCapacities.FindBySize(size.ID) if cap == nil { cap = &v1.ServerCapacity{ - Size: size, + Size: size.ID, } pc.ServerCapacities = append(pc.ServerCapacities, cap) } @@ -442,6 +451,20 @@ func (r *partitionResource) calcPartitionCapacity(pcr *v1.PartitionCapacityReque res := []v1.PartitionCapacity{} for _, pc := range pcs { pc := pc + + for _, cap := range pc.ServerCapacities { + cap := cap + + size := sizesByID[cap.Size] + + for _, reservation := range size.Reservations.ForPartition(pc.ID) { + reservation := reservation + + cap.Reservations += reservation.Amount + cap.UsedReservations += min(len(machinesByProject[reservation.ProjectID]), reservation.Amount) + } + } + res = append(res, *pc) } diff --git a/cmd/metal-api/internal/service/partition-service_test.go b/cmd/metal-api/internal/service/partition-service_test.go index 0606015ab..30c722c59 100644 --- a/cmd/metal-api/internal/service/partition-service_test.go +++ b/cmd/metal-api/internal/service/partition-service_test.go @@ -288,4 +288,6 @@ func TestPartitionCapacity(t *testing.T) { require.Equal(t, "1", c.Size) require.Equal(t, 5, c.Total) require.Equal(t, 0, c.Free) + require.Equal(t, 3, c.Reservations) + require.Equal(t, 1, c.UsedReservations) } diff --git a/cmd/metal-api/internal/service/project-service.go b/cmd/metal-api/internal/service/project-service.go index fa19a736b..f3725e064 100644 --- a/cmd/metal-api/internal/service/project-service.go +++ b/cmd/metal-api/internal/service/project-service.go @@ -112,7 +112,7 @@ func (r *projectResource) webService() *restful.WebService { func (r *projectResource) findProject(request *restful.Request, response *restful.Response) { id := request.PathParameter("id") - p, err := r.mdc.Project().Get(context.Background(), &mdmv1.ProjectGetRequest{Id: id}) + p, err := r.mdc.Project().Get(request.Request.Context(), &mdmv1.ProjectGetRequest{Id: id}) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -128,7 +128,7 @@ func (r *projectResource) findProject(request *restful.Request, response *restfu } func (r *projectResource) listProjects(request *restful.Request, response *restful.Response) { - res, err := r.mdc.Project().Find(context.Background(), &mdmv1.ProjectFindRequest{}) + res, err := r.mdc.Project().Find(request.Request.Context(), &mdmv1.ProjectFindRequest{}) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -151,7 +151,7 @@ func (r *projectResource) findProjects(request *restful.Request, response *restf return } - res, err := r.mdc.Project().Find(context.Background(), mapper.ToMdmV1ProjectFindRequest(&requestPayload)) + res, err := r.mdc.Project().Find(request.Request.Context(), mapper.ToMdmV1ProjectFindRequest(&requestPayload)) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -185,7 +185,7 @@ func (r *projectResource) createProject(request *restful.Request, response *rest Project: project, } - p, err := r.mdc.Project().Create(context.Background(), mdmv1pcr) + p, err := r.mdc.Project().Create(request.Request.Context(), mdmv1pcr) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -205,7 +205,7 @@ func (r *projectResource) deleteProject(request *restful.Request, response *rest pgr := &mdmv1.ProjectGetRequest{ Id: id, } - p, err := r.mdc.Project().Get(context.Background(), pgr) + p, err := r.mdc.Project().Get(request.Request.Context(), pgr) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -245,10 +245,28 @@ func (r *projectResource) deleteProject(request *restful.Request, response *rest return } + sizes, err := r.ds.ListSizes() + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + var sizeReservations metal.Reservations + for _, size := range sizes { + size := size + + sizeReservations = size.Reservations.ForProject(id) + } + + if len(sizeReservations) > 0 { + r.sendError(request, response, httperrors.BadRequest(errors.New("there are still size reservations made by this project"))) + return + } + pdr := &mdmv1.ProjectDeleteRequest{ Id: p.Project.Meta.Id, } - pdresponse, err := r.mdc.Project().Delete(context.Background(), pdr) + pdresponse, err := r.mdc.Project().Delete(request.Request.Context(), pdr) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -275,7 +293,7 @@ func (r *projectResource) updateProject(request *restful.Request, response *rest return } - existingProject, err := r.mdc.Project().Get(context.Background(), &mdmv1.ProjectGetRequest{Id: requestPayload.Project.Meta.Id}) + existingProject, err := r.mdc.Project().Get(request.Request.Context(), &mdmv1.ProjectGetRequest{Id: requestPayload.Project.Meta.Id}) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -337,3 +355,19 @@ func (r *projectResource) setProjectQuota(project *mdmv1.Project) (*v1.Project, return p, nil } + +func projectsByID(projects []*mdmv1.Project) map[string]*mdmv1.Project { + result := map[string]*mdmv1.Project{} + + for _, p := range projects { + p := p + + if p.Meta == nil { + continue + } + + result[p.GetMeta().GetId()] = p + } + + return result +} diff --git a/cmd/metal-api/internal/service/project-service_test.go b/cmd/metal-api/internal/service/project-service_test.go index 7a4a7fe30..68b96344a 100644 --- a/cmd/metal-api/internal/service/project-service_test.go +++ b/cmd/metal-api/internal/service/project-service_test.go @@ -1,7 +1,6 @@ package service import ( - "context" "errors" "fmt" "testing" @@ -15,6 +14,7 @@ import ( "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" "github.com/metal-stack/metal-lib/httperrors" "github.com/metal-stack/security" + testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" r "gopkg.in/rethinkdb/rethinkdb-go.v6" @@ -100,7 +100,7 @@ func Test_projectResource_findProject(t *testing.T) { userScenarios: []security.User{*testViewUser}, id: "122", projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Get", context.Background(), &mdmv1.ProjectGetRequest{Id: "122"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Get", testifymock.Anything, &mdmv1.ProjectGetRequest{Id: "122"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, wantStatus: 422, wantErr: httperrors.UnprocessableEntity(errors.New("project does not have a projectID")), @@ -109,7 +109,7 @@ func Test_projectResource_findProject(t *testing.T) { name: "entity allowed for user with admin privileges", userScenarios: []security.User{*testAdminUser}, projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Get", context.Background(), &mdmv1.ProjectGetRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Get", testifymock.Anything, &mdmv1.ProjectGetRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, id: "123", wantStatus: 422, @@ -163,7 +163,7 @@ func Test_projectResource_createProject(t *testing.T) { userScenarios: []security.User{*testViewUser}, pcr: &mdmv1.ProjectCreateRequest{Project: &mdmv1.Project{}}, projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Create", context.Background(), &mdmv1.ProjectCreateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Create", testifymock.Anything, &mdmv1.ProjectCreateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, wantStatus: 403, wantErr: httperrors.Forbidden(errors.New("you are not member in one of [k8s_kaas-admin maas-all-all-admin]")), @@ -173,7 +173,7 @@ func Test_projectResource_createProject(t *testing.T) { userScenarios: []security.User{*testAdminUser}, pcr: &mdmv1.ProjectCreateRequest{Project: &mdmv1.Project{}}, projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Create", context.Background(), &mdmv1.ProjectCreateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Create", testifymock.Anything, &mdmv1.ProjectCreateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, wantStatus: 400, wantErr: httperrors.BadRequest(errors.New("no tenant given")), @@ -227,7 +227,7 @@ func Test_projectResource_deleteProject(t *testing.T) { userScenarios: []security.User{*testViewUser}, id: "122", projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Delete", context.Background(), &mdmv1.ProjectDeleteRequest{Id: "122"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Delete", testifymock.Anything, &mdmv1.ProjectDeleteRequest{Id: "122"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, wantStatus: 403, wantErr: httperrors.Forbidden(errors.New("you are not member in one of [k8s_kaas-admin maas-all-all-admin]")), @@ -237,13 +237,14 @@ func Test_projectResource_deleteProject(t *testing.T) { userScenarios: []security.User{*testAdminUser}, id: "123", projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Get", context.Background(), &mdmv1.ProjectGetRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{Meta: &mdmv1.Meta{Id: "123"}}}, nil) - mock.On("Delete", context.Background(), &mdmv1.ProjectDeleteRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Get", testifymock.Anything, &mdmv1.ProjectGetRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{Meta: &mdmv1.Meta{Id: "123"}}}, nil) + mock.On("Delete", testifymock.Anything, &mdmv1.ProjectDeleteRequest{Id: "123"}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, dsMock: func(mock *r.Mock) { mock.On(r.DB("mockdb").Table("machine").Filter(r.MockAnything(), r.FilterOpts{})).Return([]metal.Machines{}, nil) mock.On(r.DB("mockdb").Table("network").Filter(r.MockAnything(), r.FilterOpts{})).Return([]metal.Networks{}, nil) mock.On(r.DB("mockdb").Table("ip").Filter(r.MockAnything(), r.FilterOpts{})).Return([]metal.IPs{}, nil) + mock.On(r.DB("mockdb").Table("size")).Return([]metal.Size{}, nil) }, want: &v1.ProjectResponse{}, wantStatus: 200, @@ -297,7 +298,7 @@ func Test_projectResource_updateProject(t *testing.T) { userScenarios: []security.User{*testViewUser}, pur: &mdmv1.ProjectUpdateRequest{Project: &mdmv1.Project{}}, projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Update", context.Background(), &mdmv1.ProjectUpdateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Update", testifymock.Anything, &mdmv1.ProjectUpdateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, wantStatus: 403, wantErr: httperrors.Forbidden(errors.New("you are not member in one of [k8s_kaas-admin maas-all-all-admin]")), @@ -307,7 +308,7 @@ func Test_projectResource_updateProject(t *testing.T) { userScenarios: []security.User{*testAdminUser}, pur: &mdmv1.ProjectUpdateRequest{Project: &mdmv1.Project{}}, projectServiceMock: func(mock *mdmv1mock.ProjectServiceClient) { - mock.On("Update", context.Background(), &mdmv1.ProjectUpdateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) + mock.On("Update", testifymock.Anything, &mdmv1.ProjectUpdateRequest{Project: &mdmv1.Project{}}).Return(&mdmv1.ProjectResponse{Project: &mdmv1.Project{}}, nil) }, wantStatus: 400, wantErr: httperrors.BadRequest(errors.New("project and project.meta must be specified")), diff --git a/cmd/metal-api/internal/service/size-service.go b/cmd/metal-api/internal/service/size-service.go index af76f8591..b0a1736d0 100644 --- a/cmd/metal-api/internal/service/size-service.go +++ b/cmd/metal-api/internal/service/size-service.go @@ -5,10 +5,13 @@ import ( "fmt" "net/http" + mdmv1 "github.com/metal-stack/masterdata-api/api/v1" + mdm "github.com/metal-stack/masterdata-api/pkg/client" "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" v1 "github.com/metal-stack/metal-api/cmd/metal-api/internal/service/v1" "github.com/metal-stack/metal-lib/auditing" + "github.com/metal-stack/metal-lib/pkg/pointer" "go.uber.org/zap" restfulspec "github.com/emicklei/go-restful-openapi/v2" @@ -18,15 +21,17 @@ import ( type sizeResource struct { webResource + mdc mdm.Client } // NewSize returns a webservice for size specific endpoints. -func NewSize(log *zap.SugaredLogger, ds *datastore.RethinkStore) *restful.WebService { +func NewSize(log *zap.SugaredLogger, ds *datastore.RethinkStore, mdc mdm.Client) *restful.WebService { r := sizeResource{ webResource: webResource{ log: log, ds: ds, }, + mdc: mdc, } return r.webService() } @@ -59,6 +64,16 @@ func (r *sizeResource) webService() *restful.WebService { Returns(http.StatusOK, "OK", []v1.SizeResponse{}). DefaultReturns("Error", httperrors.HTTPErrorResponse{})) + ws.Route(ws.POST("/reservations"). + To(r.listSizeReservations). + Operation("listSizeReservations"). + Doc("get all size reservations"). + Metadata(restfulspec.KeyOpenAPITags, tags). + Metadata(auditing.Exclude, true). + Writes([]v1.SizeReservationResponse{}). + Returns(http.StatusOK, "OK", []v1.SizeReservationResponse{}). + DefaultReturns("Error", httperrors.HTTPErrorResponse{})) + ws.Route(ws.POST("/suggest"). To(r.suggestSize). Operation("suggest"). @@ -191,8 +206,8 @@ func (r *sizeResource) createSize(request *restful.Request, response *restful.Re return } - if requestPayload.ID == metal.UnknownSize.GetID() { - r.sendError(request, response, httperrors.BadRequest(fmt.Errorf("id cannot be %q", metal.UnknownSize.GetID()))) + if requestPayload.ID == metal.UnknownSize().GetID() { + r.sendError(request, response, httperrors.BadRequest(fmt.Errorf("id cannot be %q", metal.UnknownSize().GetID()))) return } @@ -204,6 +219,10 @@ func (r *sizeResource) createSize(request *restful.Request, response *restful.Re if requestPayload.Description != nil { description = *requestPayload.Description } + labels := map[string]string{} + if requestPayload.Labels != nil { + labels = requestPayload.Labels + } var constraints []metal.Constraint for _, c := range requestPayload.SizeConstraints { constraint := metal.Constraint{ @@ -213,6 +232,15 @@ func (r *sizeResource) createSize(request *restful.Request, response *restful.Re } constraints = append(constraints, constraint) } + var reservations metal.Reservations + for _, r := range requestPayload.SizeReservations { + reservations = append(reservations, metal.Reservation{ + Amount: r.Amount, + Description: r.Description, + ProjectID: r.ProjectID, + PartitionIDs: r.PartitionIDs, + }) + } s := &metal.Size{ Base: metal.Base{ @@ -220,7 +248,9 @@ func (r *sizeResource) createSize(request *restful.Request, response *restful.Re Name: name, Description: description, }, - Constraints: constraints, + Constraints: constraints, + Reservations: reservations, + Labels: labels, } ss, err := r.ds.ListSizes() @@ -229,7 +259,19 @@ func (r *sizeResource) createSize(request *restful.Request, response *restful.Re return } - err = s.Validate() + ps, err := r.ds.ListPartitions() + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + projects, err := r.mdc.Project().Find(request.Request.Context(), &mdmv1.ProjectFindRequest{}) + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + err = s.Validate(ps.ByID(), projectsByID(projects.Projects)) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -289,6 +331,9 @@ func (r *sizeResource) updateSize(request *restful.Request, response *restful.Re if requestPayload.Description != nil { newSize.Description = *requestPayload.Description } + if requestPayload.Labels != nil { + newSize.Labels = requestPayload.Labels + } var constraints []metal.Constraint if requestPayload.SizeConstraints != nil { sizeConstraints := *requestPayload.SizeConstraints @@ -302,6 +347,18 @@ func (r *sizeResource) updateSize(request *restful.Request, response *restful.Re } newSize.Constraints = constraints } + var reservations metal.Reservations + if requestPayload.SizeReservations != nil { + for _, r := range requestPayload.SizeReservations { + reservations = append(reservations, metal.Reservation{ + Amount: r.Amount, + Description: r.Description, + ProjectID: r.ProjectID, + PartitionIDs: r.PartitionIDs, + }) + } + newSize.Reservations = reservations + } ss, err := r.ds.ListSizes() if err != nil { @@ -309,7 +366,19 @@ func (r *sizeResource) updateSize(request *restful.Request, response *restful.Re return } - err = newSize.Validate() + ps, err := r.ds.ListPartitions() + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + projects, err := r.mdc.Project().Find(request.Request.Context(), &mdmv1.ProjectFindRequest{}) + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + err = newSize.Validate(ps.ByID(), projectsByID(projects.Projects)) if err != nil { r.sendError(request, response, defaultError(err)) return @@ -351,3 +420,55 @@ func (r *sizeResource) fromHardware(request *restful.Request, response *restful. r.send(request, response, http.StatusOK, v1.NewSizeMatchingLog(lg[0])) } + +func (r *sizeResource) listSizeReservations(request *restful.Request, response *restful.Response) { + ss, err := r.ds.ListSizes() + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + projects, err := r.mdc.Project().Find(request.Request.Context(), &mdmv1.ProjectFindRequest{}) + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + ms, err := r.ds.ListMachines() + if err != nil { + r.sendError(request, response, defaultError(err)) + return + } + + var ( + result []*v1.SizeReservationResponse + projectsByID = projectsByID(projects.Projects) + machinesByProjectID = ms.ByProjectID() + ) + + for _, size := range ss { + size := size + + for _, reservation := range size.Reservations { + reservation := reservation + + for _, partitionID := range reservation.PartitionIDs { + project := pointer.SafeDeref(projectsByID[reservation.ProjectID]) + allocations := len(machinesByProjectID[reservation.ProjectID].WithSize(size.ID)) + + result = append(result, &v1.SizeReservationResponse{ + SizeID: size.ID, + PartitionID: partitionID, + Tenant: project.TenantId, + ProjectID: reservation.ProjectID, + ProjectName: project.Name, + Reservations: reservation.Amount, + UsedReservations: min(reservation.Amount, allocations), + ProjectAllocations: allocations, + }) + } + } + } + + r.send(request, response, http.StatusOK, result) +} diff --git a/cmd/metal-api/internal/service/size-service_test.go b/cmd/metal-api/internal/service/size-service_test.go index 9e0a02345..09ca22100 100644 --- a/cmd/metal-api/internal/service/size-service_test.go +++ b/cmd/metal-api/internal/service/size-service_test.go @@ -7,12 +7,17 @@ import ( "net/http/httptest" "testing" + "github.com/google/go-cmp/cmp" + mdmv1 "github.com/metal-stack/masterdata-api/api/v1" + mdmv1mock "github.com/metal-stack/masterdata-api/api/v1/mocks" + mdm "github.com/metal-stack/masterdata-api/pkg/client" "github.com/metal-stack/metal-api/cmd/metal-api/internal/datastore" "github.com/metal-stack/metal-api/cmd/metal-api/internal/metal" v1 "github.com/metal-stack/metal-api/cmd/metal-api/internal/service/v1" "github.com/metal-stack/metal-api/cmd/metal-api/internal/testdata" "github.com/metal-stack/metal-lib/httperrors" "github.com/stretchr/testify/assert" + testifymock "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "go.uber.org/zap/zaptest" @@ -23,7 +28,7 @@ func TestGetSizes(t *testing.T) { ds, mock := datastore.InitMockDB(t) testdata.InitMockDBData(mock) - sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds) + sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds, nil) container := restful.NewContainer().Add(sizeservice) req := httptest.NewRequest("GET", "/v1/size", nil) w := httptest.NewRecorder() @@ -52,7 +57,7 @@ func TestGetSize(t *testing.T) { ds, mock := datastore.InitMockDB(t) testdata.InitMockDBData(mock) - sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds) + sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds, nil) container := restful.NewContainer().Add(sizeservice) req := httptest.NewRequest("GET", "/v1/size/1", nil) w := httptest.NewRecorder() @@ -82,7 +87,7 @@ func TestSuggest(t *testing.T) { require.NoError(t, err) body := bytes.NewBuffer(js) - sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds) + sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds, nil) container := restful.NewContainer().Add(sizeservice) req := httptest.NewRequest("POST", "/v1/size/suggest", body) req.Header.Add("Content-Type", "application/json") @@ -122,7 +127,7 @@ func TestGetSizeNotFound(t *testing.T) { ds, mock := datastore.InitMockDB(t) testdata.InitMockDBData(mock) - sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds) + sizeservice := NewSize(zaptest.NewLogger(t).Sugar(), ds, nil) container := restful.NewContainer().Add(sizeservice) req := httptest.NewRequest("GET", "/v1/size/999", nil) w := httptest.NewRecorder() @@ -144,7 +149,7 @@ func TestDeleteSize(t *testing.T) { testdata.InitMockDBData(mock) log := zaptest.NewLogger(t).Sugar() - sizeservice := NewSize(log, ds) + sizeservice := NewSize(log, ds, nil) container := restful.NewContainer().Add(sizeservice) req := httptest.NewRequest("DELETE", "/v1/size/1", nil) container = injectAdmin(log, container, req) @@ -168,7 +173,13 @@ func TestCreateSize(t *testing.T) { testdata.InitMockDBData(mock) log := zaptest.NewLogger(t).Sugar() - sizeservice := NewSize(log, ds) + psc := &mdmv1mock.ProjectServiceClient{} + psc.On("Find", testifymock.Anything, &mdmv1.ProjectFindRequest{}).Return(&mdmv1.ProjectListResponse{Projects: []*mdmv1.Project{ + {Meta: &mdmv1.Meta{Id: "a"}}, + }}, nil) + mdc := mdm.NewMock(psc, &mdmv1mock.TenantServiceClient{}) + + sizeservice := NewSize(log, ds, mdc) container := restful.NewContainer().Add(sizeservice) createRequest := v1.SizeCreateRequest{ @@ -193,6 +204,14 @@ func TestCreateSize(t *testing.T) { Max: 100, }, }, + SizeReservations: []v1.SizeReservation{ + { + Amount: 3, + ProjectID: "a", + PartitionIDs: []string{testdata.Partition1.ID}, + Description: "test", + }, + }, } js, err := json.Marshal(createRequest) require.NoError(t, err) @@ -220,7 +239,13 @@ func TestUpdateSize(t *testing.T) { testdata.InitMockDBData(mock) log := zaptest.NewLogger(t).Sugar() - sizeservice := NewSize(log, ds) + psc := &mdmv1mock.ProjectServiceClient{} + psc.On("Find", testifymock.Anything, &mdmv1.ProjectFindRequest{}).Return(&mdmv1.ProjectListResponse{Projects: []*mdmv1.Project{ + {Meta: &mdmv1.Meta{Id: "p1"}}, + }}, nil) + mdc := mdm.NewMock(psc, &mdmv1mock.TenantServiceClient{}) + + sizeservice := NewSize(log, ds, mdc) container := restful.NewContainer().Add(sizeservice) minCores := uint64(8) @@ -266,3 +291,46 @@ func TestUpdateSize(t *testing.T) { require.Equal(t, minCores, result.SizeConstraints[0].Min) require.Equal(t, maxCores, result.SizeConstraints[0].Max) } + +func TestListSizeReservations(t *testing.T) { + ds, mock := datastore.InitMockDB(t) + testdata.InitMockDBData(mock) + log := zaptest.NewLogger(t).Sugar() + + psc := &mdmv1mock.ProjectServiceClient{} + psc.On("Find", testifymock.Anything, &mdmv1.ProjectFindRequest{}).Return(&mdmv1.ProjectListResponse{Projects: []*mdmv1.Project{ + {Meta: &mdmv1.Meta{Id: "p1"}}, + }}, nil) + mdc := mdm.NewMock(psc, &mdmv1mock.TenantServiceClient{}) + + sizeservice := NewSize(log, ds, mdc) + container := restful.NewContainer().Add(sizeservice) + + req := httptest.NewRequest("POST", "/v1/size/reservations", nil) + req.Header.Add("Content-Type", "application/json") + container = injectAdmin(log, container, req) + w := httptest.NewRecorder() + container.ServeHTTP(w, req) + + resp := w.Result() + defer resp.Body.Close() + require.Equal(t, http.StatusOK, resp.StatusCode, w.Body.String()) + var result []*v1.SizeReservationResponse + err := json.NewDecoder(resp.Body).Decode(&result) + require.NoError(t, err) + + want := []*v1.SizeReservationResponse{ + { + SizeID: testdata.Sz1.ID, + PartitionID: "1", + ProjectID: "p1", + Reservations: 3, + UsedReservations: 1, + ProjectAllocations: 1, + }, + } + + if diff := cmp.Diff(result, want); diff != "" { + t.Errorf("diff (-want +got):\n%s", diff) + } +} diff --git a/cmd/metal-api/internal/service/v1/partition.go b/cmd/metal-api/internal/service/v1/partition.go index 522075f31..7bea44237 100644 --- a/cmd/metal-api/internal/service/v1/partition.go +++ b/cmd/metal-api/internal/service/v1/partition.go @@ -47,14 +47,16 @@ type PartitionCapacity struct { } type ServerCapacity struct { - Size string `json:"size" description:"the size of the server"` - Total int `json:"total" description:"total amount of servers with this size"` - Free int `json:"free" description:"free servers with this size"` - Allocated int `json:"allocated" description:"allocated servers with this size"` - Faulty int `json:"faulty" description:"servers with issues with this size"` - FaultyMachines []string `json:"faultymachines" description:"servers with issues with this size"` - Other int `json:"other" description:"servers neither free, allocated or faulty with this size"` - OtherMachines []string `json:"othermachines" description:"servers neither free, allocated or faulty with this size"` + Size string `json:"size" description:"the size of the server"` + Total int `json:"total" description:"total amount of servers with this size"` + Free int `json:"free" description:"free servers with this size"` + Allocated int `json:"allocated" description:"allocated servers with this size"` + Reservations int `json:"reservations" description:"the amount of reservations for this size"` + UsedReservations int `json:"usedreservations" description:"the amount of used reservations for this size"` + Faulty int `json:"faulty" description:"servers with issues with this size"` + FaultyMachines []string `json:"faultymachines" description:"servers with issues with this size"` + Other int `json:"other" description:"servers neither free, allocated or faulty with this size"` + OtherMachines []string `json:"othermachines" description:"servers neither free, allocated or faulty with this size"` } func NewPartitionResponse(p *metal.Partition) *PartitionResponse { diff --git a/cmd/metal-api/internal/service/v1/size.go b/cmd/metal-api/internal/service/v1/size.go index 9c875d778..6aa6f484e 100644 --- a/cmd/metal-api/internal/service/v1/size.go +++ b/cmd/metal-api/internal/service/v1/size.go @@ -10,22 +10,46 @@ type SizeConstraint struct { Max uint64 `json:"max" description:"the maximum value of the constraint"` } +type SizeReservation struct { + Amount int `json:"amount" description:"the amount of reserved machine allocations for this size"` + Description string `json:"description,omitempty" description:"a description for this reservation"` + ProjectID string `json:"projectid" description:"the project for which this size reservation is considered"` + PartitionIDs []string `json:"partitionids" description:"the partitions in which this size reservation is considered, the amount is valid for every partition"` +} + type SizeCreateRequest struct { Common - SizeConstraints []SizeConstraint `json:"constraints" description:"a list of constraints that defines this size"` + SizeConstraints []SizeConstraint `json:"constraints" description:"a list of constraints that defines this size"` + SizeReservations []SizeReservation `json:"reservations,omitempty" description:"reservations for this size, which are considered during machine allocation" optional:"true"` + Labels map[string]string `json:"labels" description:"free labels that you associate with this network." optional:"true"` } type SizeUpdateRequest struct { Common - SizeConstraints *[]SizeConstraint `json:"constraints" description:"a list of constraints that defines this size" optional:"true"` + SizeConstraints *[]SizeConstraint `json:"constraints" description:"a list of constraints that defines this size" optional:"true"` + SizeReservations []SizeReservation `json:"reservations,omitempty" description:"reservations for this size, which are considered during machine allocation" optional:"true"` + Labels map[string]string `json:"labels" description:"free labels that you associate with this network." optional:"true"` } type SizeResponse struct { Common - SizeConstraints []SizeConstraint `json:"constraints" description:"a list of constraints that defines this size"` + SizeConstraints []SizeConstraint `json:"constraints" description:"a list of constraints that defines this size"` + SizeReservations []SizeReservation `json:"reservations,omitempty" description:"reservations for this size, which are considered during machine allocation" optional:"true"` + Labels map[string]string `json:"labels" description:"free labels that you associate with this network."` Timestamps } +type SizeReservationResponse struct { + SizeID string `json:"sizeid" description:"the size id of this size reservation"` + PartitionID string `json:"partitionid" description:"the partition id of this size reservation"` + Tenant string `json:"tenant" description:"the tenant of this size reservation"` + ProjectID string `json:"projectid" description:"the project id of this size reservation"` + ProjectName string `json:"projectname" description:"the project name of this size reservation"` + Reservations int `json:"reservations" description:"the amount of reservations of this size reservation"` + UsedReservations int `json:"usedreservations" description:"the used amount of reservations of this size reservation"` + ProjectAllocations int `json:"projectallocations" description:"the amount of allocations of this project referenced by this size reservation"` +} + type SizeSuggestRequest struct { MachineID string `json:"machineID" description:"machineID to retrieve size suggestion for"` } @@ -80,6 +104,17 @@ func NewSizeResponse(s *metal.Size) *SizeResponse { constraints = append(constraints, constraint) } + reservations := []SizeReservation{} + for _, r := range s.Reservations { + reservation := SizeReservation{ + Amount: r.Amount, + Description: r.Description, + ProjectID: r.ProjectID, + PartitionIDs: r.PartitionIDs, + } + reservations = append(reservations, reservation) + } + return &SizeResponse{ Common: Common{ Identifiable: Identifiable{ @@ -90,10 +125,12 @@ func NewSizeResponse(s *metal.Size) *SizeResponse { Description: &s.Description, }, }, - SizeConstraints: constraints, + SizeReservations: reservations, + SizeConstraints: constraints, Timestamps: Timestamps{ Created: s.Created, Changed: s.Changed, }, + Labels: s.Labels, } } diff --git a/cmd/metal-api/internal/testdata/testdata.go b/cmd/metal-api/internal/testdata/testdata.go index 3ca14f7d0..ae90776bc 100644 --- a/cmd/metal-api/internal/testdata/testdata.go +++ b/cmd/metal-api/internal/testdata/testdata.go @@ -194,6 +194,13 @@ var ( Max: 100, }, }, + Reservations: metal.Reservations{ + { + Amount: 3, + PartitionIDs: []string{Partition1.ID}, + ProjectID: "p1", + }, + }, } Sz2 = metal.Size{ Base: metal.Base{ diff --git a/cmd/metal-api/main.go b/cmd/metal-api/main.go index 92a9c455a..f3780c22b 100644 --- a/cmd/metal-api/main.go +++ b/cmd/metal-api/main.go @@ -752,7 +752,7 @@ func initRestServices(audit auditing.Auditing, withauth bool, ipmiSuperUser meta restful.DefaultContainer.Add(service.NewAudit(logger.Named("audit-service"), audit)) restful.DefaultContainer.Add(service.NewPartition(logger.Named("partition-service"), ds, nsqer)) restful.DefaultContainer.Add(service.NewImage(logger.Named("image-service"), ds)) - restful.DefaultContainer.Add(service.NewSize(logger.Named("size-service"), ds)) + restful.DefaultContainer.Add(service.NewSize(logger.Named("size-service"), ds, mdc)) restful.DefaultContainer.Add(service.NewSizeImageConstraint(logger.Named("size-image-constraint-service"), ds)) restful.DefaultContainer.Add(service.NewNetwork(logger.Named("network-service"), ds, ipamer, mdc)) restful.DefaultContainer.Add(ipService) diff --git a/spec/metal-api.json b/spec/metal-api.json index c4320ad98..eba6d34a2 100644 --- a/spec/metal-api.json +++ b/spec/metal-api.json @@ -4248,6 +4248,11 @@ }, "type": "array" }, + "reservations": { + "description": "the amount of reservations for this size", + "format": "int32", + "type": "integer" + }, "size": { "description": "the size of the server", "type": "string" @@ -4256,6 +4261,11 @@ "description": "total amount of servers with this size", "format": "int32", "type": "integer" + }, + "usedreservations": { + "description": "the amount of used reservations for this size", + "format": "int32", + "type": "integer" } }, "required": [ @@ -4265,8 +4275,10 @@ "free", "other", "othermachines", + "reservations", "size", - "total" + "total", + "usedreservations" ] }, "v1.SizeConstraint": { @@ -4337,9 +4349,23 @@ "type": "string", "uniqueItems": true }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "free labels that you associate with this network.", + "type": "object" + }, "name": { "description": "a readable name for this entity", "type": "string" + }, + "reservations": { + "description": "reservations for this size, which are considered during machine allocation", + "items": { + "$ref": "#/definitions/v1.SizeReservation" + }, + "type": "array" } }, "required": [ @@ -4473,6 +4499,84 @@ "name" ] }, + "v1.SizeReservation": { + "properties": { + "amount": { + "description": "the amount of reserved machine allocations for this size", + "format": "int32", + "type": "integer" + }, + "description": { + "description": "a description for this reservation", + "type": "string" + }, + "partitionids": { + "description": "the partitions in which this size reservation is considered, the amount is valid for every partition", + "items": { + "type": "string" + }, + "type": "array" + }, + "projectid": { + "description": "the project for which this size reservation is considered", + "type": "string" + } + }, + "required": [ + "amount", + "partitionids", + "projectid" + ] + }, + "v1.SizeReservationResponse": { + "properties": { + "partitionid": { + "description": "the partition id of this size reservation", + "type": "string" + }, + "projectallocations": { + "description": "the amount of allocations of this project referenced by this size reservation", + "format": "int32", + "type": "integer" + }, + "projectid": { + "description": "the project id of this size reservation", + "type": "string" + }, + "projectname": { + "description": "the project name of this size reservation", + "type": "string" + }, + "reservations": { + "description": "the amount of reservations of this size reservation", + "format": "int32", + "type": "integer" + }, + "sizeid": { + "description": "the size id of this size reservation", + "type": "string" + }, + "tenant": { + "description": "the tenant of this size reservation", + "type": "string" + }, + "usedreservations": { + "description": "the used amount of reservations of this size reservation", + "format": "int32", + "type": "integer" + } + }, + "required": [ + "partitionid", + "projectallocations", + "projectid", + "projectname", + "reservations", + "sizeid", + "tenant", + "usedreservations" + ] + }, "v1.SizeResponse": { "properties": { "changed": { @@ -4503,14 +4607,29 @@ "type": "string", "uniqueItems": true }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "free labels that you associate with this network.", + "type": "object" + }, "name": { "description": "a readable name for this entity", "type": "string" + }, + "reservations": { + "description": "reservations for this size, which are considered during machine allocation", + "items": { + "$ref": "#/definitions/v1.SizeReservation" + }, + "type": "array" } }, "required": [ "constraints", - "id" + "id", + "labels" ] }, "v1.SizeSuggestRequest": { @@ -4542,9 +4661,23 @@ "type": "string", "uniqueItems": true }, + "labels": { + "additionalProperties": { + "type": "string" + }, + "description": "free labels that you associate with this network.", + "type": "object" + }, "name": { "description": "a readable name for this entity", "type": "string" + }, + "reservations": { + "description": "reservations for this size, which are considered during machine allocation", + "items": { + "$ref": "#/definitions/v1.SizeReservation" + }, + "type": "array" } }, "required": [ @@ -8800,6 +8933,38 @@ ] } }, + "/v1/size/reservations": { + "post": { + "consumes": [ + "application/json" + ], + "operationId": "listSizeReservations", + "produces": [ + "application/json" + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "items": { + "$ref": "#/definitions/v1.SizeReservationResponse" + }, + "type": "array" + } + }, + "default": { + "description": "Error", + "schema": { + "$ref": "#/definitions/httperrors.HTTPErrorResponse" + } + } + }, + "summary": "get all size reservations", + "tags": [ + "size" + ] + } + }, "/v1/size/suggest": { "post": { "consumes": [