diff --git a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml index d7917d10..09716e14 100644 --- a/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml +++ b/helm/bundles/cortex-nova/templates/pipelines_kvm.yaml @@ -16,7 +16,17 @@ spec: type: filter-weigher createDecisions: true filters: [] - weighers: [] + weighers: + - name: kvm_binpack + multiplier: -1.0 # inverted = balancing + params: + - {key: resourceWeights, floatMapValue: {"memory": 1.0}} + description: | + This step implements a balancing weigher for workloads on kvm hypervisors, + which is the opposite of binpacking. Instead of pulling the requested vm + into the smallest gaps possible, it spreads the load to ensure + workloads are balanced across hosts. In this pipeline, the balancing will + focus on general purpose virtual machines. --- apiVersion: cortex.cloud/v1alpha1 kind: Pipeline @@ -34,7 +44,15 @@ spec: type: filter-weigher createDecisions: true filters: [] - weighers: [] + weighers: + - name: kvm_binpack + params: + - {key: resourceWeights, floatMapValue: {"memory": 1.0}} + description: | + This step implements a binpacking weigher for workloads on kvm hypervisors. + It pulls the requested vm into the smallest gaps possible, to ensure + other hosts with less allocation stay free for bigger vms. + In this pipeline, the binpacking will focus on hana virtual machines. --- apiVersion: cortex.cloud/v1alpha1 kind: Pipeline @@ -136,7 +154,17 @@ spec: This step filters hosts based on the `requested_destination` instruction from the nova scheduler request spec. It supports filtering by host and by aggregates. - weighers: [] + weighers: + - name: kvm_binpack + multiplier: -1.0 # inverted = balancing + params: + - {key: resourceWeights, floatMapValue: {"memory": 1.0}} + description: | + This step implements a balancing weigher for workloads on kvm hypervisors, + which is the opposite of binpacking. Instead of pulling the requested vm + into the smallest gaps possible, it spreads the load to ensure + workloads are balanced across hosts. In this pipeline, the balancing will + focus on general purpose virtual machines. --- apiVersion: cortex.cloud/v1alpha1 kind: Pipeline @@ -238,7 +266,15 @@ spec: This step filters hosts based on the `requested_destination` instruction from the nova scheduler request spec. It supports filtering by host and by aggregates. - weighers: [] + weighers: + - name: kvm_binpack + params: + - {key: resourceWeights, floatMapValue: {"memory": 1.0}} + description: | + This step implements a binpacking weigher for workloads on kvm hypervisors. + It pulls the requested vm into the smallest gaps possible, to ensure + other hosts with less allocation stay free for bigger vms. + In this pipeline, the binpacking will focus on hana virtual machines. --- apiVersion: cortex.cloud/v1alpha1 kind: Pipeline diff --git a/internal/scheduling/lib/filter_weigher_pipeline.go b/internal/scheduling/lib/filter_weigher_pipeline.go index 188d7b65..e28b7d55 100644 --- a/internal/scheduling/lib/filter_weigher_pipeline.go +++ b/internal/scheduling/lib/filter_weigher_pipeline.go @@ -209,6 +209,7 @@ func (p *filterWeigherPipeline[RequestType]) normalizeInputWeights(weights map[s // Apply the step weights to the input weights. func (p *filterWeigherPipeline[RequestType]) applyWeights( + traceLog *slog.Logger, stepWeights map[string]map[string]float64, inWeights map[string]float64, ) map[string]float64 { @@ -227,6 +228,16 @@ func (p *filterWeigherPipeline[RequestType]) applyWeights( if !ok { multiplier = 1.0 } + // This logging will help us validate the weigher multipliers are configured + // and applied correctly, as well as debug any issues with the weighers outputs. + if multiplier == 0 { + traceLog.Info("weigher multiplier is zero, won't have any effect", + "weigher", weigherName, "multiplier", multiplier) + } + if multiplier < 0 { + traceLog.Info("weigher multiplier is negative, inverting weigher behavior", + "weigher", weigherName, "multiplier", multiplier) + } outWeights = p.Apply(outWeights, weigherActivations, multiplier) } return outWeights @@ -272,7 +283,7 @@ func (p *filterWeigherPipeline[RequestType]) Run(request RequestType) (v1alpha1. remainingWeights[host] = inWeights[host] } stepWeights := p.runWeighers(traceLog, filteredRequest) - outWeights := p.applyWeights(stepWeights, remainingWeights) + outWeights := p.applyWeights(traceLog, stepWeights, remainingWeights) traceLog.Info("scheduler: output weights", "weights", outWeights) hosts := p.sortHostsByWeights(outWeights) diff --git a/internal/scheduling/lib/filter_weigher_pipeline_test.go b/internal/scheduling/lib/filter_weigher_pipeline_test.go index 3df01854..6e92d7eb 100644 --- a/internal/scheduling/lib/filter_weigher_pipeline_test.go +++ b/internal/scheduling/lib/filter_weigher_pipeline_test.go @@ -154,7 +154,7 @@ func TestPipeline_ApplyStepWeights(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - result := p.applyWeights(tt.stepWeights, tt.inWeights) + result := p.applyWeights(slog.Default(), tt.stepWeights, tt.inWeights) for host, weight := range tt.expectedResult { if result[host] != weight { t.Errorf("expected weight %f for host %s, got %f", weight, host, result[host]) diff --git a/internal/scheduling/nova/plugins/weighers/kvm_binpack.go b/internal/scheduling/nova/plugins/weighers/kvm_binpack.go new file mode 100644 index 00000000..9ad2de02 --- /dev/null +++ b/internal/scheduling/nova/plugins/weighers/kvm_binpack.go @@ -0,0 +1,157 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package weighers + +import ( + "context" + "errors" + "fmt" + "log/slog" + "slices" + + api "github.com/cobaltcore-dev/cortex/api/external/nova" + "github.com/cobaltcore-dev/cortex/internal/scheduling/lib" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" +) + +type KVMBinpackStepOpts struct { + // ResourceWeights allows configuring the weight for each resource type when + // calculating the binpacking score. The score is a weighted average of the + // node's resource utilizations after placing the VM. + // If a resource is not specified, is ignored in the score calculation + // (equivalent to a weight of 0). + ResourceWeights map[corev1.ResourceName]float64 `json:"resourceWeights"` +} + +// Validate the options to ensure they are correct before running the weigher. +func (o KVMBinpackStepOpts) Validate() error { + if len(o.ResourceWeights) == 0 { + return errors.New("at least one resource weight must be specified") + } + supportedResources := []corev1.ResourceName{ + corev1.ResourceMemory, + corev1.ResourceCPU, + } + for resourceName, value := range o.ResourceWeights { + if !slices.Contains(supportedResources, resourceName) { + return fmt.Errorf( + "unsupported resource %s in ResourceWeights, supported resources are: %v", + resourceName, supportedResources, + ) + } + // Value == 0 means the weight shouldn't be provided or the weigher + // disabled in general. + if value == 0 { + return fmt.Errorf("resource weight for %s can't be zero, if you want to "+ + "disable this resource in the weigher, remove it or the weigher", resourceName) + } + // Value < 0 doesn't work since the division of the + // weighted sum by the total weight will turn the score positive again, + // which is likely not what the user intended when setting a negative + // weight to invert the weigher's behavior. + if value < 0 { + return fmt.Errorf("resource weight for %s can't be negative. "+ + "use weigher.multiplier to invert this weighers behavior", resourceName) + } + } + return nil +} + +// This step implements a binpacking weigher for workloads on kvm hypervisors. +// It pulls the requested vm into the smallest gaps possible, to ensure +// other hosts with less allocation stay free for bigger vms. +// Explanation of the algorithm: https://volcano.sh/en/docs/plugins/#binpack +type KVMBinpackStep struct { + // Base weigher providing common functionality. + lib.BaseWeigher[api.ExternalSchedulerRequest, KVMBinpackStepOpts] +} + +// Run this weigher in the pipeline after filters have been executed. +func (s *KVMBinpackStep) Run(traceLog *slog.Logger, request api.ExternalSchedulerRequest) (*lib.FilterWeigherPipelineStepResult, error) { + result := s.IncludeAllHostsFromRequest(request) + result.Statistics["binpack score"] = s.PrepareStats(request, "float") + + hvs := &hv1.HypervisorList{} + if err := s.Client.List(context.Background(), hvs); err != nil { + traceLog.Error("failed to list hypervisors", "error", err) + return nil, err + } + hvsByName := make(map[string]hv1.Hypervisor, len(hvs.Items)) + for _, hv := range hvs.Items { + hvsByName[hv.Name] = hv + } + vmResources := s.calcVMResources(request) + + for host := range result.Activations { + hv, ok := hvsByName[host] + if !ok { + traceLog.Warn("no hv for host, skipping", "host", host) + continue + } + var totalWeightedUtilization, totalWeight float64 + + for resourceName, weight := range s.Options.ResourceWeights { + allocation, ok := hv.Status.Allocation[resourceName.String()] + if !ok { + traceLog.Warn("no allocation in status, skipping", + "host", host, "resource", resourceName) + continue + } + capacity, ok := hv.Status.Capacity[resourceName.String()] + if !ok { + traceLog.Warn("no capacity in status, skipping", + "host", host, "resource", resourceName) + continue + } + if capacity.IsZero() { + traceLog.Warn("capacity is zero, skipping", + "host", host, "resource", resourceName) + continue + } + used := capacity.DeepCopy() + used.Sub(allocation) + vmReq, ok := vmResources[resourceName] + if !ok { + traceLog.Warn("no resource request for vm, skipping", + "resource", resourceName) + continue + } + used.Add(vmReq) + utilization := used.AsApproximateFloat64() / capacity.AsApproximateFloat64() + totalWeightedUtilization += utilization * weight + totalWeight += weight + } + + var score float64 + if totalWeight != 0 { + score = totalWeightedUtilization / totalWeight // This can be > 1.0 + } + result.Activations[host] = score + result.Statistics["binpack score"].Hosts[host] = score + traceLog.Info("calculated binpack score for host", + "host", host, "score", score) + } + + return result, nil +} + +// calcVMResources calculates the total resource requests for the VM to be scheduled. +func (s *KVMBinpackStep) calcVMResources(req api.ExternalSchedulerRequest) map[corev1.ResourceName]resource.Quantity { + resources := make(map[corev1.ResourceName]resource.Quantity) + resourcesMemBytes := int64(req.Spec.Data.Flavor.Data.MemoryMB * 1_000_000) //nolint:gosec // memory values are bounded by Nova + resourcesMemBytes *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova + resources[corev1.ResourceMemory] = *resource. + NewQuantity(resourcesMemBytes, resource.DecimalSI) + resourcesCPU := int64(req.Spec.Data.Flavor.Data.VCPUs) //nolint:gosec // vCPU values are bounded by Nova + resourcesCPU *= int64(req.Spec.Data.NumInstances) //nolint:gosec // instance count is bounded by Nova + resources[corev1.ResourceCPU] = *resource. + NewQuantity(resourcesCPU, resource.DecimalSI) + return resources +} + +func init() { + Index["kvm_binpack"] = func() NovaWeigher { return &KVMBinpackStep{} } +} diff --git a/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go b/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go new file mode 100644 index 00000000..de66f29a --- /dev/null +++ b/internal/scheduling/nova/plugins/weighers/kvm_binpack_test.go @@ -0,0 +1,581 @@ +// Copyright SAP SE +// SPDX-License-Identifier: Apache-2.0 + +package weighers + +import ( + "log/slog" + "strings" + "testing" + + api "github.com/cobaltcore-dev/cortex/api/external/nova" + hv1 "github.com/cobaltcore-dev/openstack-hypervisor-operator/api/v1" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" +) + +func newHypervisor(name, capacityCPU, capacityMem, allocationCPU, allocationMem string) *hv1.Hypervisor { + return &hv1.Hypervisor{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + }, + Status: hv1.HypervisorStatus{ + Capacity: map[string]resource.Quantity{ + "cpu": resource.MustParse(capacityCPU), + "memory": resource.MustParse(capacityMem), + }, + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse(allocationCPU), + "memory": resource.MustParse(allocationMem), + }, + }, + } +} + +func newBinpackRequest(memoryMB, vcpus, numInstances uint64, hosts []string) api.ExternalSchedulerRequest { + hostList := make([]api.ExternalSchedulerHost, len(hosts)) + for i, h := range hosts { + hostList[i] = api.ExternalSchedulerHost{ComputeHost: h} + } + + extraSpecs := map[string]string{ + "capabilities:hypervisor_type": "qemu", + } + + spec := api.NovaSpec{ + ProjectID: "project-A", + InstanceUUID: "instance-123", + NumInstances: numInstances, + Flavor: api.NovaObject[api.NovaFlavor]{ + Data: api.NovaFlavor{ + Name: "m1.large", + VCPUs: vcpus, + MemoryMB: memoryMB, + ExtraSpecs: extraSpecs, + }, + }, + } + + weights := make(map[string]float64) + for _, h := range hosts { + weights[h] = 1.0 + } + + return api.ExternalSchedulerRequest{ + Spec: api.NovaObject[api.NovaSpec]{Data: spec}, + Hosts: hostList, + Weights: weights, + } +} + +func TestKVMBinpackStepOpts_Validate(t *testing.T) { + tests := []struct { + name string + opts KVMBinpackStepOpts + wantErr bool + errMsg string + }{ + { + name: "valid opts with memory and cpu weights", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceMemory: 1.0, + corev1.ResourceCPU: 1.0, + }, + }, + wantErr: false, + }, + { + name: "inverted weights should raise error", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceMemory: -1.0, + corev1.ResourceCPU: -1.0, + }, + }, + wantErr: true, + }, + { + name: "zero weights should raise error", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceMemory: 0.0, + corev1.ResourceCPU: 0.0, + }, + }, + wantErr: true, + }, + { + name: "valid opts with only memory weight", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceMemory: 2.0, + }, + }, + wantErr: false, + }, + { + name: "valid opts with only cpu weight", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 0.5, + }, + }, + wantErr: false, + }, + { + name: "zero weights should raise error", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceMemory: 0.0, + corev1.ResourceCPU: 0.0, + }, + }, + wantErr: true, + }, + { + name: "valid opts with empty resource weights", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{}, + }, + wantErr: true, + }, + { + name: "valid opts with nil resource weights", + opts: KVMBinpackStepOpts{}, + wantErr: true, + }, + { + name: "invalid opts with unsupported resource", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceStorage: 1.0, + }, + }, + wantErr: true, + errMsg: "unsupported resource", + }, + { + name: "invalid opts with unsupported ephemeral-storage resource", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceEphemeralStorage: 1.0, + }, + }, + wantErr: true, + errMsg: "unsupported resource", + }, + { + name: "invalid opts with custom unsupported resource", + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + "nvidia.com/gpu": 1.0, + }, + }, + wantErr: true, + errMsg: "unsupported resource", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := tt.opts.Validate() + if tt.wantErr { + if err == nil { + t.Errorf("expected error, got nil") + } else if tt.errMsg != "" && !strings.Contains(err.Error(), tt.errMsg) { + t.Errorf("expected error containing %q, got %q", tt.errMsg, err.Error()) + } + } else { + if err != nil { + t.Errorf("expected no error, got %v", err) + } + } + }) + } +} + +func TestKVMBinpackStep_Run(t *testing.T) { + scheme := buildTestScheme(t) + + tests := []struct { + name string + hypervisors []*hv1.Hypervisor + request api.ExternalSchedulerRequest + opts KVMBinpackStepOpts + expectedWeights map[string]float64 + wantErr bool + }{ + { + name: "basic binpacking with memory weight only", + hypervisors: []*hv1.Hypervisor{ + // host1: capacity 100Gi, allocation (free) 80Gi -> used 20Gi, adding 8Gi VM -> 28Gi used + // utilization after VM = 28/100 = 0.28 + newHypervisor("host1", "100", "100Gi", "80", "80Gi"), + // host2: capacity 100Gi, allocation (free) 20Gi -> used 80Gi, adding 8Gi VM -> 88Gi used + // utilization after VM = 88/100 = 0.88 + newHypervisor("host2", "100", "100Gi", "20", "20Gi"), + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}), // 8Gi memory + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceMemory: 1.0, + }, + }, + expectedWeights: map[string]float64{ // with 0.1 tolerance + "host1": 0.3, + "host2": 0.9, + }, + wantErr: false, + }, + { + name: "basic binpacking with cpu weight only", + hypervisors: []*hv1.Hypervisor{ + // host1: capacity 100 CPUs, allocation (free) 80 CPUs -> used 20 CPUs + newHypervisor("host1", "100", "100Gi", "80", "80Gi"), + // host2: capacity 100 CPUs, allocation (free) 20 CPUs -> used 80 CPUs + newHypervisor("host2", "100", "100Gi", "20", "20Gi"), + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ // with 0.1 tolerance + "host1": 0.3, + "host2": 0.8, + }, + wantErr: false, + }, + { + name: "binpacking with both cpu and memory weights", + hypervisors: []*hv1.Hypervisor{ + newHypervisor("host1", "100", "100Gi", "80", "80Gi"), + newHypervisor("host2", "100", "100Gi", "20", "20Gi"), + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + corev1.ResourceMemory: 1.0, + }, + }, + expectedWeights: map[string]float64{ // with 0.1 tolerance + "host1": 0.26, + "host2": 0.86, + }, + wantErr: false, + }, + { + name: "binpacking with different weights for cpu and memory", + hypervisors: []*hv1.Hypervisor{ + newHypervisor("host1", "100", "100Gi", "80", "80Gi"), + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 2.0, + corev1.ResourceMemory: 1.0, + }, + }, + expectedWeights: map[string]float64{ // with 0.1 tolerance + "host1": 0.25, + }, + wantErr: false, + }, + { + name: "binpacking with multiple instances", + hypervisors: []*hv1.Hypervisor{ + newHypervisor("host1", "100", "100Gi", "80", "80Gi"), + }, + request: newBinpackRequest(8192, 4, 2, []string{"host1"}), // 2 instances + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ // with 0.1 tolerance + "host1": 0.3, + }, + wantErr: false, + }, + { + name: "no hypervisors found - hosts skipped", + hypervisors: []*hv1.Hypervisor{}, + request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ + // Both hosts should have default weight (0) since no hypervisors found + "host1": 0, + "host2": 0, + }, + wantErr: false, + }, + { + name: "hypervisor missing for one host", + hypervisors: []*hv1.Hypervisor{ + newHypervisor("host1", "100", "100Gi", "80", "80Gi"), + // host2 hypervisor is missing + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1", "host2"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ + "host1": 0.24, + "host2": 0, // Default weight since no hypervisor + }, + wantErr: false, + }, + { + name: "empty resource weights - no scoring", + hypervisors: []*hv1.Hypervisor{ + newHypervisor("host1", "100", "100Gi", "80", "80Gi"), + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{}, + }, + expectedWeights: map[string]float64{ + "host1": 0, // No weights configured, score is 0 + }, + wantErr: false, + }, + { + name: "hypervisor with zero capacity - skipped", + hypervisors: []*hv1.Hypervisor{ + { + ObjectMeta: metav1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capacity: map[string]resource.Quantity{ + "cpu": resource.MustParse("0"), + "memory": resource.MustParse("100Gi"), + }, + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("0"), + "memory": resource.MustParse("80Gi"), + }, + }, + }, + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ + "host1": 0, // CPU capacity is zero, skipped + }, + wantErr: false, + }, + { + name: "hypervisor missing allocation for resource", + hypervisors: []*hv1.Hypervisor{ + { + ObjectMeta: metav1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capacity: map[string]resource.Quantity{ + "cpu": resource.MustParse("100"), + }, + Allocation: map[string]resource.Quantity{ + // No CPU allocation + }, + }, + }, + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ + "host1": 0, // No allocation data, skipped + }, + wantErr: false, + }, + { + name: "hypervisor missing capacity for resource", + hypervisors: []*hv1.Hypervisor{ + { + ObjectMeta: metav1.ObjectMeta{Name: "host1"}, + Status: hv1.HypervisorStatus{ + Capacity: map[string]resource.Quantity{ + // No CPU capacity + }, + Allocation: map[string]resource.Quantity{ + "cpu": resource.MustParse("80"), + }, + }, + }, + }, + request: newBinpackRequest(8192, 4, 1, []string{"host1"}), + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ + "host1": 0, // No capacity data, skipped + }, + wantErr: false, + }, + { + name: "high utilization scenario (over 100%)", + hypervisors: []*hv1.Hypervisor{ + // Host with very little free resources + newHypervisor("host1", "10", "10Gi", "1", "1Gi"), + }, + request: newBinpackRequest(20480, 20, 1, []string{"host1"}), // 20Gi, 20 CPUs - more than available + opts: KVMBinpackStepOpts{ + ResourceWeights: map[corev1.ResourceName]float64{ + corev1.ResourceCPU: 1.0, + }, + }, + expectedWeights: map[string]float64{ + // (10 - 1 + 20) / 10 = 29/10 = 2.9 (over 100%) + "host1": 2.9, + }, + wantErr: false, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + objects := make([]client.Object, 0, len(tt.hypervisors)) + for _, hv := range tt.hypervisors { + objects = append(objects, hv) + } + + step := &KVMBinpackStep{} + step.Client = fake.NewClientBuilder().WithScheme(scheme).WithObjects(objects...).Build() + step.Options = tt.opts + + result, err := step.Run(slog.Default(), tt.request) + if tt.wantErr { + if err == nil { + t.Fatalf("expected error, got nil") + } + return + } + if err != nil { + t.Fatalf("expected no error, got %v", err) + } + + for host, expectedWeight := range tt.expectedWeights { + actualWeight, ok := result.Activations[host] + if !ok { + t.Errorf("expected host %s to be in activations", host) + continue + } + diff := actualWeight - expectedWeight + if diff < 0 { + diff = -diff + } + if diff > 0.1 { // tolerance of 0.1 + t.Errorf("for host %s, expected weight approximately %.2f, got %.2f", host, expectedWeight, actualWeight) + } + } + + // Verify statistics are populated + if _, ok := result.Statistics["binpack score"]; !ok { + t.Error("expected statistics to contain 'binpack score'") + } + }) + } +} + +func TestKVMBinpackStep_calcVMResources(t *testing.T) { + tests := []struct { + name string + request api.ExternalSchedulerRequest + expectedMemBytes int64 + expectedCPU int64 + }{ + { + name: "single instance with 8G memory and 4 CPUs", + request: newBinpackRequest(8192, 4, 1, []string{"host1"}), + expectedMemBytes: 8192 * 1_000_000, + expectedCPU: 4, + }, + { + name: "multiple instances", + request: newBinpackRequest(4096, 2, 3, []string{"host1"}), + expectedMemBytes: 4096 * 1_000_000 * 3, + expectedCPU: 2 * 3, + }, + { + name: "zero memory", + request: newBinpackRequest(0, 4, 1, []string{"host1"}), + expectedMemBytes: 0, + expectedCPU: 4, + }, + { + name: "zero CPUs", + request: newBinpackRequest(8192, 0, 1, []string{"host1"}), + expectedMemBytes: 8192 * 1_000_000, + expectedCPU: 0, + }, + { + name: "large values", + request: newBinpackRequest(524288, 128, 10, []string{"host1"}), // 512Gi, 128 CPUs, 10 instances + expectedMemBytes: 524288 * 1_000_000 * 10, + expectedCPU: 128 * 10, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + step := &KVMBinpackStep{} + resources := step.calcVMResources(tt.request) + + memResource, ok := resources[corev1.ResourceMemory] + if !ok { + t.Error("expected memory resource to be present") + } else { + actualMem := memResource.Value() + if actualMem != tt.expectedMemBytes { + t.Errorf("expected memory %d bytes, got %d", tt.expectedMemBytes, actualMem) + } + } + + cpuResource, ok := resources[corev1.ResourceCPU] + if !ok { + t.Error("expected CPU resource to be present") + } else { + actualCPU := cpuResource.Value() + if actualCPU != tt.expectedCPU { + t.Errorf("expected CPU %d, got %d", tt.expectedCPU, actualCPU) + } + } + }) + } +} + +func TestKVMBinpackStep_IndexRegistration(t *testing.T) { + factory, ok := Index["kvm_binpack"] + if !ok { + t.Fatal("kvm_binpack not found in Index") + } + + weigher := factory() + if weigher == nil { + t.Fatal("factory returned nil weigher") + } + + _, ok = weigher.(*KVMBinpackStep) + if !ok { + t.Fatalf("expected *KVMBinpackStep, got %T", weigher) + } +}