diff --git a/CHANGELOG.md b/CHANGELOG.md index eef43573d9c..eee513b713c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -79,6 +79,7 @@ Here is an overview of all new **experimental** features: - **General**: Prevent multiple ScaledObjects managing one HPA ([#6130](https://github.com/kedacore/keda/issues/6130)) - **General**: Show full triggers'types and authentications'types in status ([#6187](https://github.com/kedacore/keda/issues/6187)) - **AWS CloudWatch Scaler**: Add support for ignoreNullValues ([#5352](https://github.com/kedacore/keda/issues/5352)) +- **CPU/Memory Scaler**: Add activation feature ([#6057](https://github.com/kedacore/keda/issues/6057)) - **Elasticsearch Scaler**: Support Query at the Elasticsearch scaler ([#6216](https://github.com/kedacore/keda/issues/6216)) - **Etcd Scaler**: Add username and password support for etcd ([#6199](https://github.com/kedacore/keda/pull/6199)) - **GCP Scalers**: Added custom time horizon in GCP scalers ([#5778](https://github.com/kedacore/keda/issues/5778)) diff --git a/cmd/operator/main.go b/cmd/operator/main.go index 16f6899d230..c7652b0a6fe 100644 --- a/cmd/operator/main.go +++ b/cmd/operator/main.go @@ -30,6 +30,7 @@ import ( clientgoscheme "k8s.io/client-go/kubernetes/scheme" _ "k8s.io/client-go/plugin/pkg/client/auth" "k8s.io/client-go/tools/cache" + metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" ctrl "sigs.k8s.io/controller-runtime" ctrlcache "sigs.k8s.io/controller-runtime/pkg/cache" "sigs.k8s.io/controller-runtime/pkg/controller" @@ -59,7 +60,7 @@ var ( func init() { utilruntime.Must(clientgoscheme.AddToScheme(scheme)) - + utilruntime.Must(metricsv1beta1.AddToScheme(scheme)) utilruntime.Must(kedav1alpha1.AddToScheme(scheme)) utilruntime.Must(eventingv1alpha1.AddToScheme(scheme)) //+kubebuilder:scaffold:scheme @@ -217,7 +218,13 @@ func main() { os.Exit(1) } - scaledHandler := scaling.NewScaleHandler(mgr.GetClient(), scaleClient, mgr.GetScheme(), globalHTTPTimeout, eventRecorder, secretInformer.Lister()) + podMetricsClient, err := k8s.InitPodMetricsClient(mgr) + if err != nil { + setupLog.Error(err, "unable to init metrics client") + os.Exit(1) + } + + scaledHandler := scaling.NewScaleHandler(mgr.GetClient(), scaleClient, podMetricsClient, mgr.GetScheme(), globalHTTPTimeout, eventRecorder, secretInformer.Lister()) eventEmitter := eventemitter.NewEventEmitter(mgr.GetClient(), eventRecorder, k8sClusterName, secretInformer.Lister()) if err = (&kedacontrollers.ScaledObjectReconciler{ diff --git a/controllers/keda/scaledjob_controller.go b/controllers/keda/scaledjob_controller.go index 7ffc6ed04f2..3c28621f734 100755 --- a/controllers/keda/scaledjob_controller.go +++ b/controllers/keda/scaledjob_controller.go @@ -83,7 +83,7 @@ func init() { // SetupWithManager initializes the ScaledJobReconciler instance and starts a new controller managed by the passed Manager instance. func (r *ScaledJobReconciler) SetupWithManager(mgr ctrl.Manager, options controller.Options) error { - r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), nil, mgr.GetScheme(), r.GlobalHTTPTimeout, mgr.GetEventRecorderFor("scale-handler"), r.SecretsLister) + r.scaleHandler = scaling.NewScaleHandler(mgr.GetClient(), nil, nil, mgr.GetScheme(), r.GlobalHTTPTimeout, mgr.GetEventRecorderFor("scale-handler"), r.SecretsLister) r.scaledJobGenerations = &sync.Map{} return ctrl.NewControllerManagedBy(mgr). WithOptions(options). diff --git a/controllers/keda/scaledobject_controller_test.go b/controllers/keda/scaledobject_controller_test.go index 059025fc2ab..a09ca0c1f4f 100644 --- a/controllers/keda/scaledobject_controller_test.go +++ b/controllers/keda/scaledobject_controller_test.go @@ -790,13 +790,16 @@ var _ = Describe("ScaledObjectController", func() { Eventually(func() error { return k8sClient.Get(context.Background(), types.NamespacedName{Name: getHPAName(so), Namespace: "default"}, hpa) }).ShouldNot(HaveOccurred()) + + averageUtilization := int32(100) hpa.Status.CurrentMetrics = []autoscalingv2.MetricStatus{ { Type: autoscalingv2.ResourceMetricSourceType, Resource: &autoscalingv2.ResourceMetricStatus{ Name: corev1.ResourceCPU, Current: autoscalingv2.MetricValueStatus{ - Value: resource.NewQuantity(int64(100), resource.DecimalSI), + Value: resource.NewQuantity(int64(100), resource.DecimalSI), + AverageUtilization: &averageUtilization, }, }, }, diff --git a/controllers/keda/suite_test.go b/controllers/keda/suite_test.go index 482a45b135c..14204113c6c 100644 --- a/controllers/keda/suite_test.go +++ b/controllers/keda/suite_test.go @@ -91,10 +91,13 @@ var _ = BeforeSuite(func() { scaleClient, _, err := k8s.InitScaleClient(k8sManager) Expect(err).ToNot(HaveOccurred()) + metricsClient, err := k8s.InitPodMetricsClient(k8sManager) + Expect(err).ToNot(HaveOccurred()) + err = (&ScaledObjectReconciler{ Client: k8sManager.GetClient(), Scheme: k8sManager.GetScheme(), - ScaleHandler: scaling.NewScaleHandler(k8sManager.GetClient(), scaleClient, k8sManager.GetScheme(), time.Duration(10), k8sManager.GetEventRecorderFor("keda-operator"), nil), + ScaleHandler: scaling.NewScaleHandler(k8sManager.GetClient(), scaleClient, metricsClient, k8sManager.GetScheme(), time.Duration(10), k8sManager.GetEventRecorderFor("keda-operator"), nil), ScaleClient: scaleClient, EventEmitter: eventemitter.NewEventEmitter(k8sManager.GetClient(), k8sManager.GetEventRecorderFor("keda-operator"), "kubernetes-default", nil), }).SetupWithManager(k8sManager, controller.Options{}) diff --git a/pkg/k8s/podmetrics_client.go b/pkg/k8s/podmetrics_client.go new file mode 100644 index 00000000000..f564c808abd --- /dev/null +++ b/pkg/k8s/podmetrics_client.go @@ -0,0 +1,21 @@ +package k8s + +import ( + "fmt" + + "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + ctrl "sigs.k8s.io/controller-runtime" +) + +var metricsClientLog = ctrl.Log.WithName("metricsclient") + +// InitPodMetricsClient initializes the client for pod metrics. It is used to fetch pod metrics from the metrics server. +func InitPodMetricsClient(mgr ctrl.Manager) (v1beta1.PodMetricsesGetter, error) { + clientset, err := v1beta1.NewForConfig(mgr.GetConfig()) + if err != nil { + metricsClientLog.Error(err, "not able to create metrics client") + return nil, fmt.Errorf("failed to create metrics clientset: %w", err) + } + + return clientset, nil +} diff --git a/pkg/scalers/cpu_memory_scaler.go b/pkg/scalers/cpu_memory_scaler.go index 8da440ab77e..5664b9e2127 100644 --- a/pkg/scalers/cpu_memory_scaler.go +++ b/pkg/scalers/cpu_memory_scaler.go @@ -6,48 +6,109 @@ import ( "strconv" "github.com/go-logr/logr" + appsv1 "k8s.io/api/apps/v1" v2 "k8s.io/api/autoscaling/v2" - v1 "k8s.io/api/core/v1" + corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/apimachinery/pkg/runtime/schema" + "k8s.io/apimachinery/pkg/types" "k8s.io/metrics/pkg/apis/external_metrics" + "k8s.io/metrics/pkg/apis/metrics/v1beta1" + metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" ) type cpuMemoryScaler struct { - metadata cpuMemoryMetadata - resourceName v1.ResourceName - logger logr.Logger + metadata *cpuMemoryMetadata + resourceName corev1.ResourceName + logger logr.Logger + kubeClient client.Client + podMetricsClient metricsv1beta1.PodMetricsesGetter } +const ( + cpuMetricName = "cpu" + memoryMetricName = "memory" + defaultActivationValue = "0" + scaledObjectType = "ScaledObject" +) + type cpuMemoryMetadata struct { - Type string `keda:"name=type, order=triggerMetadata, enum=Utilization;AverageValue, optional"` - Value string `keda:"name=value, order=triggerMetadata"` - ContainerName string `keda:"name=containerName, order=triggerMetadata, optional"` - AverageValue *resource.Quantity - AverageUtilization *int32 - MetricType v2.MetricTargetType + Type string `keda:"name=type, order=triggerMetadata, enum=Utilization;AverageValue, optional"` + Value string `keda:"name=value, order=triggerMetadata"` + ActivationValue string `keda:"name=activationValue, order=triggerMetadata, optional"` + ContainerName string `keda:"name=containerName, order=triggerMetadata, optional"` + MetricType v2.MetricTargetType + AverageValue *resource.Quantity + AverageUtilization *int32 + ActivationAverageValue *resource.Quantity + ActivationAverageUtilization *int32 + ScalableObjectType string + Namespace string + ScaleTargetGVK schema.GroupVersionKind + ScaleTargetNamespacedName types.NamespacedName } // NewCPUMemoryScaler creates a new cpuMemoryScaler -func NewCPUMemoryScaler(resourceName v1.ResourceName, config *scalersconfig.ScalerConfig) (Scaler, error) { +func NewCPUMemoryScaler(resourceName corev1.ResourceName, config *scalersconfig.ScalerConfig, kubeClient client.Client, podMetricsClient metricsv1beta1.PodMetricsesGetter) (Scaler, error) { logger := InitializeLogger(config, "cpu_memory_scaler") - meta, err := parseResourceMetadata(config, logger) + meta, err := parseResourceMetadata(config, logger, kubeClient) if err != nil { return nil, fmt.Errorf("error parsing %s metadata: %w", resourceName, err) } return &cpuMemoryScaler{ - metadata: meta, - resourceName: resourceName, - logger: logger, + metadata: meta, + resourceName: resourceName, + logger: logger, + kubeClient: kubeClient, + podMetricsClient: podMetricsClient, }, nil } -func parseResourceMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger) (cpuMemoryMetadata, error) { - meta := cpuMemoryMetadata{} - err := config.TypedConfig(&meta) +// getScaleTarget retrieves the GVK and its namespaced name of the scale target (e.g., Deployment, StatefulSet) +// from a ScaledObject. It returns an error if the ScaledObject is not found or if it doesn't +// have a scale target reference. +func getScaleTarget(scalableObjectName, scalableObjectNamespace, scalableObjectType string, kubeClient client.Client) (schema.GroupVersionKind, types.NamespacedName, error) { + if scalableObjectType != scaledObjectType { + return schema.GroupVersionKind{}, types.NamespacedName{}, fmt.Errorf("unsupported scalable object type: %s", scalableObjectType) + } + + scaledObject := &kedav1alpha1.ScaledObject{} + err := kubeClient.Get(context.Background(), types.NamespacedName{ + Name: scalableObjectName, + Namespace: scalableObjectNamespace, + }, scaledObject) + + if err != nil { + return schema.GroupVersionKind{}, types.NamespacedName{}, err + } + + if scaledObject.Spec.ScaleTargetRef == nil { + return schema.GroupVersionKind{}, types.NamespacedName{}, fmt.Errorf("scaled object %s has no scale target ref", scalableObjectName) + } + + gvk := scaledObject.Status.ScaleTargetGVKR.GroupVersionKind() + namespacedName := types.NamespacedName{ + Name: scaledObject.Spec.ScaleTargetRef.Name, + Namespace: scaledObject.Namespace, + } + + return gvk, namespacedName, nil +} + +func parseResourceMetadata(config *scalersconfig.ScalerConfig, logger logr.Logger, kubeClient client.Client) (*cpuMemoryMetadata, error) { + meta := &cpuMemoryMetadata{ + ActivationValue: defaultActivationValue, + } + + err := config.TypedConfig(meta) if err != nil { return meta, err } @@ -73,19 +134,42 @@ func parseResourceMetadata(config *scalersconfig.ScalerConfig, logger logr.Logge case v2.AverageValueMetricType: averageValueQuantity := resource.MustParse(meta.Value) meta.AverageValue = &averageValueQuantity + + activationValueQuantity := resource.MustParse(meta.ActivationValue) + meta.ActivationAverageValue = &activationValueQuantity case v2.UtilizationMetricType: utilizationNum, err := parseUtilization(meta.Value) if err != nil { return meta, err } meta.AverageUtilization = utilizationNum + + utilizationNum, err = parseUtilization(meta.ActivationValue) + if err != nil { + return meta, err + } + meta.ActivationAverageUtilization = utilizationNum default: return meta, fmt.Errorf("unknown metric type: %s, allowed values are 'Utilization' or 'AverageValue'", string(meta.MetricType)) } + if config.ScalableObjectType == "ScaledObject" { + gvk, namespacedName, err := getScaleTarget(config.ScalableObjectName, config.ScalableObjectNamespace, config.ScalableObjectType, kubeClient) + if err != nil { + return nil, err + } + + meta.ScaleTargetGVK = gvk + meta.ScaleTargetNamespacedName = namespacedName + } + + meta.ScalableObjectType = config.ScalableObjectType + meta.Namespace = config.ScalableObjectNamespace + return meta, nil } +// parseUtilization parses the utilization value string and returns it as an int32 func parseUtilization(value string) (*int32, error) { valueNum, err := strconv.ParseInt(value, 10, 32) if err != nil { @@ -131,7 +215,311 @@ func (s *cpuMemoryScaler) GetMetricSpecForScaling(context.Context) []v2.MetricSp return []v2.MetricSpec{metricSpec} } -// GetMetricsAndActivity no need for cpu/memory scaler and always active for cpu/memory scaler -func (s *cpuMemoryScaler) GetMetricsAndActivity(_ context.Context, _ string) ([]external_metrics.ExternalMetricValue, bool, error) { - return nil, true, nil +// calculateAverage computes the average value of for the given total resource quantity and count +func calculateAverage(total *resource.Quantity, count int64) *resource.Quantity { + if count == 0 { + return &resource.Quantity{} + } + + // Convert the total to milli-units + nanoValue := total.ScaledValue(resource.Nano) + + // Perform the division + averageNanoValue := nanoValue / count + + // Create a new Quantity from the average milli-value + return resource.NewScaledQuantity(averageNanoValue, resource.Nano) +} + +// getAverageValue calculates the average resource (CPU/Memory) usage across all running pods +// that match the scaler's target +func (s *cpuMemoryScaler) getAverageValue(ctx context.Context, metricName string) (*resource.Quantity, error) { + podList, labelSelector, err := s.getPodList(ctx) + if err != nil { + return nil, err + } + + if podList == nil || labelSelector == nil { + var q resource.Quantity + return &q, nil + } + + podMetricsList, err := s.getPodMetricsList(ctx, labelSelector) + if err != nil { + return nil, err + } + + totalValue := &resource.Quantity{} + podCount := 0 + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodRunning { + continue + } + + podMetrics := getPodMetrics(podMetricsList, pod.Name) + if podMetrics == nil { + continue + } + + var metricValue *resource.Quantity + if s.metadata.ContainerName != "" { + containerMetrics := getContainerMetrics(podMetrics, s.metadata.ContainerName) + if containerMetrics == nil { + continue + } + metricValue = getResourceValue(containerMetrics, metricName) + } else { + metricValue = getPodResourceValue(podMetrics, metricName) + } + + if metricValue == nil { + return nil, fmt.Errorf("unsupported metric name: %s", metricName) + } + + totalValue.Add(*metricValue) + podCount++ + } + + if podCount == 0 { + return nil, fmt.Errorf("no running pods found") + } + + averageValue := calculateAverage(totalValue, int64(podCount)) + return averageValue, nil +} + +// getAverageUtilization calculates the average resource utilization (as a percentage) across +// all running pods that match the scaler's target +func (s *cpuMemoryScaler) getAverageUtilization(ctx context.Context, metricName string) (*int32, error) { + podList, labelSelector, err := s.getPodList(ctx) + if err != nil { + return nil, err + } + + if podList == nil || labelSelector == nil { + var p int32 + return &p, nil + } + + podMetricsList, err := s.getPodMetricsList(ctx, labelSelector) + if err != nil { + return nil, err + } + + var totalUtilization int64 + podCount := 0 + + for _, pod := range podList.Items { + if pod.Status.Phase != corev1.PodRunning { + continue + } + + podMetrics := getPodMetrics(podMetricsList, pod.Name) + if podMetrics == nil { + continue + } + + var metricValue, capacity int64 + if s.metadata.ContainerName != "" { + containerMetrics := getContainerMetrics(podMetrics, s.metadata.ContainerName) + if containerMetrics == nil { + continue + } + metricValue = getResourceValueInMillis(containerMetrics, metricName) + capacity = getContainerResourceCapacity(&pod, s.metadata.ContainerName, getResourceName(metricName)) + } else { + metricValue = getPodResourceValueInMillis(podMetrics, metricName) + capacity = getPodResourceCapacity(&pod, getResourceName(metricName)) + } + + if capacity == 0 { + continue + } + + utilization := (metricValue * 100) / capacity + totalUtilization += utilization + podCount++ + } + + if podCount == 0 { + return nil, fmt.Errorf("no running pods found with non-zero capacity") + } + + averageUtilization := int32(totalUtilization / int64(podCount)) + return &averageUtilization, nil +} + +// getResourceValue extracts the appropriate resource (CPU/Memory) value from container metrics +func getResourceValue(containerMetrics *v1beta1.ContainerMetrics, metricName string) *resource.Quantity { + switch metricName { + case cpuMetricName: + return containerMetrics.Usage.Cpu() + case memoryMetricName: + return containerMetrics.Usage.Memory() + default: + return nil + } +} + +// getPodResourceValue calculates the total resource usage for a pod by summing +// all of its containers' resource usage +func getPodResourceValue(podMetrics *v1beta1.PodMetrics, metricName string) *resource.Quantity { + var total resource.Quantity + for _, container := range podMetrics.Containers { + if value := getResourceValue(&container, metricName); value != nil { + total.Add(*value) + } + } + return &total +} + +// getResourceValueInMillis returns the resource value in milli-units for CPU +// or bytes for memory +func getResourceValueInMillis(containerMetrics *v1beta1.ContainerMetrics, metricName string) int64 { + switch metricName { + case cpuMetricName: + return containerMetrics.Usage.Cpu().MilliValue() + case memoryMetricName: + return containerMetrics.Usage.Memory().Value() + default: + return 0 + } +} + +// getPodResourceCapacity calculates the total resource capacity (requests) for a pod +// by summing the requests of all its containers +func getPodResourceValueInMillis(podMetrics *v1beta1.PodMetrics, metricName string) int64 { + var total int64 + for _, container := range podMetrics.Containers { + total += getResourceValueInMillis(&container, metricName) + } + return total +} + +func getResourceName(metricName string) corev1.ResourceName { + switch metricName { + case cpuMetricName: + return corev1.ResourceCPU + case memoryMetricName: + return corev1.ResourceMemory + default: + return "" + } +} + +// getPodResourceCapacity calculates the total resource capacity (requests) for a pod +// by summing the requests of all its containers +func getPodResourceCapacity(pod *corev1.Pod, resourceName corev1.ResourceName) int64 { + var total int64 + for _, container := range pod.Spec.Containers { + if quantity, ok := container.Resources.Requests[resourceName]; ok { + total += quantity.MilliValue() + } + } + return total +} + +// getPodList retrieves the list of pods matching the scale target's selector +func (s *cpuMemoryScaler) getPodList(ctx context.Context) (*corev1.PodList, labels.Selector, error) { + var labelSelector labels.Selector + + switch { + case s.metadata.ScaleTargetGVK.Group == "apps" && s.metadata.ScaleTargetGVK.Kind == "Deployment": + deployment := &appsv1.Deployment{} + err := s.kubeClient.Get(ctx, s.metadata.ScaleTargetNamespacedName, deployment) + if err != nil { + return nil, nil, fmt.Errorf("failed to get deployment: %w", err) + } + + labelSelector = labels.SelectorFromSet(deployment.Spec.Selector.MatchLabels) + case s.metadata.ScaleTargetGVK.Group == "apps" && s.metadata.ScaleTargetGVK.Kind == "StatefulSet": + statefulSet := &appsv1.StatefulSet{} + err := s.kubeClient.Get(ctx, s.metadata.ScaleTargetNamespacedName, statefulSet) + if err != nil { + return nil, nil, fmt.Errorf("failed to get statefulset: %w", err) + } + + labelSelector = labels.SelectorFromSet(statefulSet.Spec.Selector.MatchLabels) + default: + return nil, nil, nil + } + + podList := &corev1.PodList{} + err := s.kubeClient.List(ctx, podList, &client.ListOptions{ + Namespace: s.metadata.Namespace, + LabelSelector: labelSelector, + }) + + if err != nil { + return nil, nil, fmt.Errorf("failed to list pods: %w", err) + } + + return podList, labelSelector, nil +} + +// getPodMetricsList retrieves metrics for pods matching the given label selector +// using the pod metrics client +func (s *cpuMemoryScaler) getPodMetricsList(ctx context.Context, labelSelector labels.Selector) (*v1beta1.PodMetricsList, error) { + podsMetricsList, err := s.podMetricsClient.PodMetricses(s.metadata.Namespace).List(ctx, metav1.ListOptions{ + LabelSelector: labelSelector.String(), + }) + + return podsMetricsList, err +} + +func getPodMetrics(podMetricsList *v1beta1.PodMetricsList, podName string) *v1beta1.PodMetrics { + for _, podMetrics := range podMetricsList.Items { + if podMetrics.Name == podName { + return &podMetrics + } + } + return nil +} + +func getContainerMetrics(podMetrics *v1beta1.PodMetrics, containerName string) *v1beta1.ContainerMetrics { + for _, containerMetrics := range podMetrics.Containers { + if containerMetrics.Name == containerName { + return &containerMetrics + } + } + return nil +} + +// getContainerResourceCapacity retrieves the resource capacity (requests) for a specific container within a pod +func getContainerResourceCapacity(pod *corev1.Pod, containerName string, resourceName corev1.ResourceName) int64 { + for _, container := range pod.Spec.Containers { + if container.Name == containerName { + if quantity, ok := container.Resources.Requests[resourceName]; ok { + return quantity.MilliValue() + } + } + } + return 0 +} + +// GetMetricsAndActivity only returns the activity of the cpu/memory scaler +func (s *cpuMemoryScaler) GetMetricsAndActivity(ctx context.Context, metricName string) ([]external_metrics.ExternalMetricValue, bool, error) { + if s.metadata.ScalableObjectType == "ScaledJob" { + return nil, false, nil + } + + switch s.metadata.MetricType { + case v2.AverageValueMetricType: + averageValue, err := s.getAverageValue(ctx, metricName) + if err != nil { + return nil, false, err + } + + return nil, averageValue.Cmp(*s.metadata.ActivationAverageValue) == 1, nil + case v2.UtilizationMetricType: + averageUtilization, err := s.getAverageUtilization(ctx, metricName) + if err != nil { + return nil, false, err + } + + return nil, *averageUtilization > *s.metadata.ActivationAverageUtilization, nil + } + + return nil, false, fmt.Errorf("no matching resource metric found for %s", s.resourceName) } diff --git a/pkg/scalers/cpu_memory_scaler_test.go b/pkg/scalers/cpu_memory_scaler_test.go index 78f662de247..bd2f83ec2c6 100644 --- a/pkg/scalers/cpu_memory_scaler_test.go +++ b/pkg/scalers/cpu_memory_scaler_test.go @@ -6,9 +6,18 @@ import ( "github.com/go-logr/logr" "github.com/stretchr/testify/assert" + appsv1 "k8s.io/api/apps/v1" v2 "k8s.io/api/autoscaling/v2" v1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/resource" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/watch" + "k8s.io/client-go/kubernetes/scheme" + metrics "k8s.io/metrics/pkg/apis/metrics/v1beta1" + metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" "github.com/kedacore/keda/v2/pkg/scalers/scalersconfig" ) @@ -19,8 +28,9 @@ type parseCPUMemoryMetadataTestData struct { } var validCPUMemoryMetadata = map[string]string{ - "type": "Utilization", - "value": "50", + "type": "Utilization", + "value": "50", + "activationValue": "40", } var validContainerCPUMemoryMetadata = map[string]string{ "type": "Utilization", @@ -36,12 +46,195 @@ var testCPUMemoryMetadata = []parseCPUMemoryMetadataTestData{ {v2.UtilizationMetricType, map[string]string{"value": "50"}, false}, {"", map[string]string{"type": "AverageValue", "value": "50"}, false}, {v2.AverageValueMetricType, map[string]string{"value": "50"}, false}, + {"", map[string]string{"type": "AverageValue", "value": "50", "activationValue": "40"}, false}, {"", map[string]string{"type": "Value", "value": "50"}, true}, {v2.ValueMetricType, map[string]string{"value": "50"}, true}, {"", map[string]string{"type": "AverageValue"}, true}, {"", map[string]string{"type": "xxx", "value": "50"}, true}, } +var testCPUMemoryMetadataActivationPresent = parseCPUMemoryMetadataTestData{ + metricType: v2.AverageValueMetricType, + metadata: map[string]string{"type": "AverageValue", "value": "50", "activationValue": "40"}, +} + +var testCPUMemoryMetadataActivationNotPresent = parseCPUMemoryMetadataTestData{ + metricType: v2.AverageValueMetricType, + metadata: map[string]string{"type": "AverageValue", "value": "50"}, +} + +var selectLabels = map[string]string{ + "app": "test-deployment", +} + +func createScaledObject() *kedav1alpha1.ScaledObject { + maxReplicas := int32(3) + minReplicas := int32(0) + pollingInterval := int32(10) + return &kedav1alpha1.ScaledObject{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "keda.sh/v1alpha1", + Kind: "ScaledObject", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-name", + Namespace: "test-namespace", + }, + Spec: kedav1alpha1.ScaledObjectSpec{ + MaxReplicaCount: &maxReplicas, + MinReplicaCount: &minReplicas, + PollingInterval: &pollingInterval, + ScaleTargetRef: &kedav1alpha1.ScaleTarget{ + APIVersion: "apps/v1", + Kind: "Deployment", + Name: "test-deployment", + }, + Triggers: []kedav1alpha1.ScaleTriggers{ + { + Type: "cpu", + Metadata: map[string]string{ + "activationValue": "500", + "value": "800", + }, + MetricType: v2.UtilizationMetricType, + }, + }, + }, + Status: kedav1alpha1.ScaledObjectStatus{ + HpaName: "keda-hpa-test-name", + ScaleTargetGVKR: &kedav1alpha1.GroupVersionKindResource{ + Group: "apps", + Version: "v1", + Resource: "deployments", + Kind: "Deployment", + }, + }, + } +} + +func createDeployment() *appsv1.Deployment { + replicas := int32(1) + deployment := &appsv1.Deployment{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "apps/v1", + Kind: "Deployment", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment", + Namespace: "test-namespace", + }, + Spec: appsv1.DeploymentSpec{ + Replicas: &replicas, + Template: v1.PodTemplateSpec{ + ObjectMeta: metav1.ObjectMeta{ + Labels: selectLabels, + }, + }, + Selector: &metav1.LabelSelector{ + MatchLabels: selectLabels, + }, + }, + } + return deployment +} + +func createPod(cpuRequest string) *v1.Pod { + pod := &v1.Pod{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "v1", + Kind: "Pod", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-1", + Namespace: "test-namespace", + Labels: selectLabels, + }, + Spec: v1.PodSpec{ + Containers: []v1.Container{ + { + Name: "test-container", + Resources: v1.ResourceRequirements{ + Limits: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse("600m"), + }, + Requests: v1.ResourceList{ + v1.ResourceCPU: resource.MustParse(cpuRequest), + }, + }, + }, + }, + }, + Status: v1.PodStatus{ + Phase: v1.PodRunning, + }, + } + + return pod +} + +func createPodMetrics(cpuUsage string) *metrics.PodMetrics { + err := metrics.AddToScheme(scheme.Scheme) + if err != nil { + return nil + } + + cpuQuantity, _ := resource.ParseQuantity(cpuUsage) + return &metrics.PodMetrics{ + TypeMeta: metav1.TypeMeta{ + APIVersion: "metrics.k8s.io/v1beta1", + Kind: "PodMetrics", + }, + ObjectMeta: metav1.ObjectMeta{ + Name: "test-deployment-1", + Namespace: "test-namespace", + Labels: selectLabels, + }, + Containers: []metrics.ContainerMetrics{ + { + Name: "test-container", + Usage: v1.ResourceList{ + v1.ResourceCPU: cpuQuantity, + }, + }, + }, + } +} + +type mockPodMetricsesGetter struct { +} + +func (m *mockPodMetricsesGetter) PodMetricses(namespace string) metricsv1beta1.PodMetricsInterface { + return &mockPodMetricsInterface{} +} + +type mockPodMetricsInterface struct { + metricsv1beta1.PodMetricsExpansion +} + +func (m *mockPodMetricsInterface) Get(ctx context.Context, name string, opts metav1.GetOptions) (*metrics.PodMetrics, error) { + return nil, nil +} + +func (m *mockPodMetricsInterface) List(ctx context.Context, opts metav1.ListOptions) (*metrics.PodMetricsList, error) { + return &metrics.PodMetricsList{ + Items: []metrics.PodMetrics{ + *createPodMetrics("500m"), + }, + TypeMeta: metav1.TypeMeta{ + APIVersion: "metrics.k8s.io/v1beta1", + Kind: "PodMetricsList", + }, + }, nil +} + +func (m *mockPodMetricsInterface) Watch(ctx context.Context, opts metav1.ListOptions) (watch.Interface, error) { + return nil, nil +} + +func getMockMetricsClient() metricsv1beta1.PodMetricsesGetter { + return &mockPodMetricsesGetter{} +} + func TestCPUMemoryParseMetadata(t *testing.T) { logger := logr.Discard() for i, testData := range testCPUMemoryMetadata { @@ -49,7 +242,7 @@ func TestCPUMemoryParseMetadata(t *testing.T) { TriggerMetadata: testData.metadata, MetricType: testData.metricType, } - _, err := parseResourceMetadata(config, logger) + _, err := parseResourceMetadata(config, logger, fake.NewFakeClient()) if err != nil && !testData.isError { t.Errorf("Test case %d: Expected success but got error: %v", i, err) } @@ -57,6 +250,34 @@ func TestCPUMemoryParseMetadata(t *testing.T) { t.Errorf("Test case %d: Expected error but got success", i) } } + + // Test activation value present + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: testCPUMemoryMetadataActivationPresent.metadata, + MetricType: testCPUMemoryMetadataActivationPresent.metricType, + } + + metadata, err := parseResourceMetadata(config, logger, fake.NewFakeClient()) + if err != nil { + t.Errorf("Test case activation value present: Expected success but got error: %v", err) + } + if !metadata.ActivationAverageValue.Equal(resource.MustParse("40")) { + t.Errorf("Test case activation value present: Expected activation value 40 but got %v", metadata.ActivationAverageValue) + } + + // Test activation value not present + config = &scalersconfig.ScalerConfig{ + TriggerMetadata: testCPUMemoryMetadataActivationNotPresent.metadata, + MetricType: testCPUMemoryMetadataActivationNotPresent.metricType, + } + + metadata, err = parseResourceMetadata(config, logger, fake.NewFakeClient()) + if err != nil { + t.Errorf("Test case activation value not present: Expected success but got error: %v", err) + } + if !metadata.ActivationAverageValue.Equal(resource.MustParse("0")) { + t.Errorf("Test case activation value not present: Expected activation value 0 but got %v", metadata.ActivationAverageValue) + } } func TestGetMetricSpecForScaling(t *testing.T) { @@ -64,7 +285,9 @@ func TestGetMetricSpecForScaling(t *testing.T) { config := &scalersconfig.ScalerConfig{ TriggerMetadata: validCPUMemoryMetadata, } - scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config) + kubeClient := fake.NewFakeClient() + metricsClient := getMockMetricsClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient, metricsClient) metricSpec := scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType) @@ -76,11 +299,11 @@ func TestGetMetricSpecForScaling(t *testing.T) { TriggerMetadata: map[string]string{"value": "50"}, MetricType: v2.UtilizationMetricType, } - scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config) + scaler, _ = NewCPUMemoryScaler(v1.ResourceMemory, config, kubeClient, metricsClient) metricSpec = scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ResourceMetricSourceType) - assert.Equal(t, metricSpec[0].Resource.Name, v1.ResourceCPU) + assert.Equal(t, metricSpec[0].Resource.Name, v1.ResourceMemory) assert.Equal(t, metricSpec[0].Resource.Target.Type, v2.UtilizationMetricType) } @@ -89,7 +312,9 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { config := &scalersconfig.ScalerConfig{ TriggerMetadata: validContainerCPUMemoryMetadata, } - scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config) + kubeClient := fake.NewFakeClient() + metricsClient := getMockMetricsClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient, metricsClient) metricSpec := scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType) @@ -102,7 +327,7 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { TriggerMetadata: map[string]string{"value": "50", "containerName": "bar"}, MetricType: v2.UtilizationMetricType, } - scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config) + scaler, _ = NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient, metricsClient) metricSpec = scaler.GetMetricSpecForScaling(context.Background()) assert.Equal(t, metricSpec[0].Type, v2.ContainerResourceMetricSourceType) @@ -110,3 +335,53 @@ func TestGetContainerMetricSpecForScaling(t *testing.T) { assert.Equal(t, metricSpec[0].ContainerResource.Target.Type, v2.UtilizationMetricType) assert.Equal(t, metricSpec[0].ContainerResource.Container, "bar") } + +func TestGetMetricsAndActivity_IsActive(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: validCPUMemoryMetadata, + ScalableObjectType: "ScaledObject", + ScalableObjectName: "test-name", + ScalableObjectNamespace: "test-namespace", + } + + deployment := createDeployment() + pod := createPod("400m") + + err := kedav1alpha1.AddToScheme(scheme.Scheme) + if err != nil { + t.Errorf("Error adding to scheme: %s", err) + return + } + + kubeClient := fake.NewClientBuilder().WithObjects(deployment, pod, createPodMetrics("500m"), createScaledObject()).WithScheme(scheme.Scheme).Build() + metricsClient := getMockMetricsClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient, metricsClient) + + _, isActive, _ := scaler.GetMetricsAndActivity(context.Background(), "cpu") + assert.Equal(t, true, isActive) +} + +func TestGetMetricsAndActivity_IsNotActive(t *testing.T) { + config := &scalersconfig.ScalerConfig{ + TriggerMetadata: validCPUMemoryMetadata, + ScalableObjectType: "ScaledObject", + ScalableObjectName: "test-name", + ScalableObjectNamespace: "test-namespace", + } + + deployment := createDeployment() + pod := createPod("2") + + err := kedav1alpha1.AddToScheme(scheme.Scheme) + if err != nil { + t.Errorf("Error adding to scheme: %s", err) + return + } + + kubeClient := fake.NewClientBuilder().WithObjects(deployment, pod, createScaledObject()).WithScheme(scheme.Scheme).Build() + metricsClient := getMockMetricsClient() + scaler, _ := NewCPUMemoryScaler(v1.ResourceCPU, config, kubeClient, metricsClient) + + _, isActive, _ := scaler.GetMetricsAndActivity(context.Background(), "cpu") + assert.Equal(t, isActive, false) +} diff --git a/pkg/scaling/scale_handler.go b/pkg/scaling/scale_handler.go index 5a955e48e66..405049865f2 100644 --- a/pkg/scaling/scale_handler.go +++ b/pkg/scaling/scale_handler.go @@ -18,6 +18,7 @@ package scaling import ( "context" + "errors" "fmt" "strconv" "strings" @@ -33,6 +34,7 @@ import ( "k8s.io/client-go/scale" "k8s.io/client-go/tools/record" "k8s.io/metrics/pkg/apis/external_metrics" + metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" logf "sigs.k8s.io/controller-runtime/pkg/log" @@ -66,6 +68,7 @@ type ScaleHandler interface { type scaleHandler struct { client client.Client + podMetricsClient metricsv1beta1.PodMetricsesGetter scaleLoopContexts *sync.Map scaleExecutor executor.ScaleExecutor globalHTTPTimeout time.Duration @@ -77,9 +80,10 @@ type scaleHandler struct { } // NewScaleHandler creates a ScaleHandler object -func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, reconcilerScheme *runtime.Scheme, globalHTTPTimeout time.Duration, recorder record.EventRecorder, secretsLister corev1listers.SecretLister) ScaleHandler { +func NewScaleHandler(client client.Client, scaleClient scale.ScalesGetter, podMetricsClient metricsv1beta1.PodMetricsesGetter, reconcilerScheme *runtime.Scheme, globalHTTPTimeout time.Duration, recorder record.EventRecorder, secretsLister corev1listers.SecretLister) ScaleHandler { return &scaleHandler{ client: client, + podMetricsClient: podMetricsClient, scaleLoopContexts: &sync.Map{}, scaleExecutor: executor.NewScaleExecutor(client, scaleClient, reconcilerScheme, recorder), globalHTTPTimeout: globalHTTPTimeout, @@ -748,60 +752,74 @@ func (*scaleHandler) getScalerState(ctx context.Context, scaler scalers.Scaler, } for _, spec := range metricSpecs { - if spec.External == nil { - continue - } + switch { + case spec.Resource != nil: + metricName := spec.Resource.Name.String() + _, isMetricActive, _, err := cache.GetMetricsAndActivityForScaler(ctx, triggerIndex, metricName) + if err != nil { + result.Err = err + logger.Error(err, "error getting metric source", "source", result.TriggerName, "metricName", metricName) + cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAMetricSourceFailed, err.Error()) + continue + } - metricName := spec.External.Metric.Name + metricscollector.RecordScalerError(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, err) + metricscollector.RecordScalerActive(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, isMetricActive) + result.IsActive = isMetricActive + case spec.External != nil: + metricName := spec.External.Metric.Name - var latency time.Duration - metrics, isMetricActive, latency, err := cache.GetMetricsAndActivityForScaler(ctx, triggerIndex, metricName) - metricscollector.RecordScalerError(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, err) - if latency != -1 { - metricscollector.RecordScalerLatency(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, latency) - } - result.Metrics = append(result.Metrics, metrics...) - logger.V(1).Info("Getting metrics and activity from scaler", "scaler", result.TriggerName, "metricName", metricName, "metrics", metrics, "activity", isMetricActive, "scalerError", err) - - if scalerConfig.TriggerUseCachedMetrics { - result.Records[metricName] = metricscache.MetricsRecord{ - IsActive: isMetricActive, - Metric: metrics, - ScalerError: err, + var latency time.Duration + metrics, isMetricActive, latency, err := cache.GetMetricsAndActivityForScaler(ctx, triggerIndex, metricName) + metricscollector.RecordScalerError(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, err) + if latency != -1 { + metricscollector.RecordScalerLatency(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, latency) + } + result.Metrics = append(result.Metrics, metrics...) + logger.V(1).Info("Getting metrics and activity from scaler", "scaler", result.TriggerName, "metricName", metricName, "metrics", metrics, "activity", isMetricActive, "scalerError", err) + + if scalerConfig.TriggerUseCachedMetrics { + result.Records[metricName] = metricscache.MetricsRecord{ + IsActive: isMetricActive, + Metric: metrics, + ScalerError: err, + } } - } - if err != nil { - result.Err = err - if scaledObject.IsUsingModifiers() { - logger.Error(err, "error getting metric source", "source", result.TriggerName) - cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAMetricSourceFailed, err.Error()) + if err != nil { + result.Err = err + if scaledObject.IsUsingModifiers() { + logger.Error(err, "error getting metric source", "source", result.TriggerName) + cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAMetricSourceFailed, err.Error()) + } else { + logger.Error(err, "error getting scale decision", "scaler", result.TriggerName) + cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) + } } else { - logger.Error(err, "error getting scale decision", "scaler", result.TriggerName) - cache.Recorder.Event(scaledObject, corev1.EventTypeWarning, eventreason.KEDAScalerFailed, err.Error()) - } - } else { - result.IsActive = isMetricActive - for _, metric := range metrics { - metricValue := metric.Value.AsApproximateFloat64() - metricscollector.RecordScalerMetric(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metric.MetricName, true, metricValue) - } - if !scaledObject.IsUsingModifiers() { - if isMetricActive { - if spec.External != nil { - logger.V(1).Info("Scaler for scaledObject is active", "scaler", result.TriggerName, "metricName", metricName) - } - if spec.Resource != nil { - logger.V(1).Info("Scaler for scaledObject is active", "scaler", result.TriggerName, "metricName", spec.Resource.Name) + result.IsActive = isMetricActive + for _, metric := range metrics { + metricValue := metric.Value.AsApproximateFloat64() + metricscollector.RecordScalerMetric(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metric.MetricName, true, metricValue) + } + if !scaledObject.IsUsingModifiers() { + if isMetricActive { + if spec.External != nil { + logger.V(1).Info("Scaler for scaledObject is active", "scaler", result.TriggerName, "metricName", metricName) + } + if spec.Resource != nil { + logger.V(1).Info("Scaler for scaledObject is active", "scaler", result.TriggerName, "metricName", spec.Resource.Name) + } } + metricscollector.RecordScalerActive(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, isMetricActive) } - metricscollector.RecordScalerActive(scaledObject.Namespace, scaledObject.Name, result.TriggerName, triggerIndex, metricName, true, isMetricActive) } - } - result.Pairs, err = modifiers.GetPairTriggerAndMetric(scaledObject, metricName, scalerConfig.TriggerName) - if err != nil { - logger.Error(err, "error pairing triggers & metrics for compositeScaler") + result.Pairs, err = modifiers.GetPairTriggerAndMetric(scaledObject, metricName, scalerConfig.TriggerName) + if err != nil { + logger.Error(err, "error pairing triggers & metrics for compositeScaler") + } + default: + logger.Error(errors.New("error parsing metric for the scaler"), "both resource and external metrics are nil", "scaler", result.TriggerName) } } return result diff --git a/pkg/scaling/scalers_builder.go b/pkg/scaling/scalers_builder.go index 80bfb40658f..7f8509210ed 100644 --- a/pkg/scaling/scalers_builder.go +++ b/pkg/scaling/scalers_builder.go @@ -21,6 +21,7 @@ import ( "fmt" corev1 "k8s.io/api/core/v1" + metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" "sigs.k8s.io/controller-runtime/pkg/client" kedav1alpha1 "github.com/kedacore/keda/v2/apis/keda/v1alpha1" @@ -82,7 +83,7 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp } config.AuthParams = authParams config.PodIdentity = podIdentity - scaler, err := buildScaler(ctx, h.client, trigger.Type, config) + scaler, err := buildScaler(ctx, h.client, h.podMetricsClient, trigger.Type, config) return scaler, config, err } @@ -113,7 +114,7 @@ func (h *scaleHandler) buildScalers(ctx context.Context, withTriggers *kedav1alp } // buildScaler builds a scaler form input config and trigger type -func buildScaler(ctx context.Context, client client.Client, triggerType string, config *scalersconfig.ScalerConfig) (scalers.Scaler, error) { +func buildScaler(ctx context.Context, client client.Client, metricsClient metricsv1beta1.PodMetricsesGetter, triggerType string, config *scalersconfig.ScalerConfig) (scalers.Scaler, error) { // TRIGGERS-START switch triggerType { case "activemq": @@ -159,7 +160,7 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, case "couchdb": return scalers.NewCouchDBScaler(ctx, config) case "cpu": - return scalers.NewCPUMemoryScaler(corev1.ResourceCPU, config) + return scalers.NewCPUMemoryScaler(corev1.ResourceCPU, config, client, metricsClient) case "cron": return scalers.NewCronScaler(config) case "datadog": @@ -204,7 +205,7 @@ func buildScaler(ctx context.Context, client client.Client, triggerType string, case "loki": return scalers.NewLokiScaler(config) case "memory": - return scalers.NewCPUMemoryScaler(corev1.ResourceMemory, config) + return scalers.NewCPUMemoryScaler(corev1.ResourceMemory, config, client, metricsClient) case "metrics-api": return scalers.NewMetricsAPIScaler(config) case "mongodb": diff --git a/tests/scalers/cpu/cpu_test.go b/tests/scalers/cpu/cpu_test.go index f24922dc61d..a7f55bdef86 100644 --- a/tests/scalers/cpu/cpu_test.go +++ b/tests/scalers/cpu/cpu_test.go @@ -135,7 +135,8 @@ spec: - type: cpu metadata: type: Utilization - value: "50" + value: "10" + activationValue: "5" - type: kubernetes-workload metadata: podSelector: 'pod={{.WorkloadDeploymentName}}' @@ -245,9 +246,18 @@ func scaleToZero(t *testing.T, kc *kubernetes.Clientset, data templateData) { assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, maxReplicas, 60, 1), "Replica count should be %v", maxReplicas) - // scale external trigger in (expect replicas back to 0 -- external trigger not active) + // activate cpu trigger + KubectlReplaceWithTemplate(t, data, "triggerJobTemplate", triggerJob) + + // replica count should not change from maxReplicas + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60) + + // scale external trigger in (expect replicas to stay at maxReplicas -- external trigger not active) KubernetesScaleDeployment(t, kc, workloadDeploymentName, int64(minReplicas), testNamespace) + AssertReplicaCountNotChangeDuringTimePeriod(t, kc, deploymentName, testNamespace, maxReplicas, 60) + // remove trigger job to deactivate cpu trigger + KubectlDeleteWithTemplate(t, data, "triggerJobTemplate", triggerJob) assert.True(t, WaitForDeploymentReplicaReadyCount(t, kc, deploymentName, testNamespace, minReplicas, 60, 1), "Replica count should be %v", minReplicas) } diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go new file mode 100644 index 00000000000..9c1153ffc2f --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=metrics.k8s.io + +// Package metrics defines an API for exposing metrics. +package metrics // import "k8s.io/metrics/pkg/apis/metrics" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/register.go new file mode 100644 index 00000000000..9384e44b912 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/register.go @@ -0,0 +1,55 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: runtime.APIVersionInternal} + +// Kind takes an unqualified kind and returns back a Group qualified GroupKind +func Kind(kind string) schema.GroupKind { + return SchemeGroupVersion.WithKind(kind).GroupKind() +} + +// Resource takes an unqualified resource and returns back a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder is the scheme builder with scheme init functions to run for this API package + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + // AddToScheme is a common registration function for mapping packaged scoped group & version keys to a scheme + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/types.go new file mode 100644 index 00000000000..f1c58c76840 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/types.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package metrics + +import ( + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time + Window metav1.Duration + + // The memory usage is the memory working set. + Usage corev1.ResourceList +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta + + // List of node metrics. + Items []NodeMetrics +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time + Window metav1.Duration + + // Metrics for all containers are collected within the same time window. + Containers []ContainerMetrics +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta + + // List of pod metrics. + Items []PodMetrics +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string + // The memory usage is the memory working set. + Usage corev1.ResourceList +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go new file mode 100644 index 00000000000..8e06b220547 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics +// +k8s:openapi-gen=true +// +groupName=metrics.k8s.io + +// Package v1alpha1 is the v1alpha1 version of the metrics API. +package v1alpha1 // import "k8s.io/metrics/pkg/apis/metrics/v1alpha1" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go new file mode 100644 index 00000000000..c472bacd4b1 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.pb.go @@ -0,0 +1,1758 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto + +package v1alpha1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } +func (*ContainerMetrics) ProtoMessage() {} +func (*ContainerMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{0} +} +func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerMetrics.Merge(m, src) +} +func (m *ContainerMetrics) XXX_Size() int { + return m.Size() +} +func (m *ContainerMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo + +func (m *NodeMetrics) Reset() { *m = NodeMetrics{} } +func (*NodeMetrics) ProtoMessage() {} +func (*NodeMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{1} +} +func (m *NodeMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetrics.Merge(m, src) +} +func (m *NodeMetrics) XXX_Size() int { + return m.Size() +} +func (m *NodeMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetrics proto.InternalMessageInfo + +func (m *NodeMetricsList) Reset() { *m = NodeMetricsList{} } +func (*NodeMetricsList) ProtoMessage() {} +func (*NodeMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{2} +} +func (m *NodeMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetricsList.Merge(m, src) +} +func (m *NodeMetricsList) XXX_Size() int { + return m.Size() +} +func (m *NodeMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetricsList proto.InternalMessageInfo + +func (m *PodMetrics) Reset() { *m = PodMetrics{} } +func (*PodMetrics) ProtoMessage() {} +func (*PodMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{3} +} +func (m *PodMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetrics.Merge(m, src) +} +func (m *PodMetrics) XXX_Size() int { + return m.Size() +} +func (m *PodMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetrics proto.InternalMessageInfo + +func (m *PodMetricsList) Reset() { *m = PodMetricsList{} } +func (*PodMetricsList) ProtoMessage() {} +func (*PodMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_4bcbecebae081ea6, []int{4} +} +func (m *PodMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetricsList.Merge(m, src) +} +func (m *PodMetricsList) XXX_Size() int { + return m.Size() +} +func (m *PodMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetricsList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ContainerMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.ContainerMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.ContainerMetrics.UsageEntry") + proto.RegisterType((*NodeMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetrics.UsageEntry") + proto.RegisterType((*NodeMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.NodeMetricsList") + proto.RegisterType((*PodMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.PodMetrics") + proto.RegisterType((*PodMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1alpha1.PodMetricsList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto", fileDescriptor_4bcbecebae081ea6) +} + +var fileDescriptor_4bcbecebae081ea6 = []byte{ + // 661 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0x41, 0x4f, 0x13, 0x41, + 0x18, 0xed, 0xd0, 0x96, 0xc0, 0x54, 0x11, 0xf7, 0x44, 0x7a, 0xd8, 0x92, 0x9e, 0x1a, 0x13, 0x66, + 0x85, 0xa0, 0x21, 0x9c, 0xcc, 0x0a, 0x07, 0x13, 0x41, 0xd9, 0xa0, 0x46, 0xf4, 0xe0, 0x74, 0x3b, + 0x6e, 0xc7, 0xb2, 0x3b, 0x9b, 0x99, 0xd9, 0x92, 0xde, 0x8c, 0x7a, 0xf2, 0x64, 0xe2, 0x9f, 0xc2, + 0x78, 0xe1, 0xc8, 0x45, 0x90, 0xf5, 0xee, 0x0f, 0xf0, 0x64, 0x76, 0x76, 0xb6, 0x5b, 0x29, 0xc2, + 0xca, 0xc1, 0x13, 0xb7, 0xee, 0x37, 0xf3, 0xde, 0xfb, 0xe6, 0x7d, 0x6f, 0x26, 0x85, 0x5b, 0xbd, + 0x15, 0x81, 0x28, 0xb3, 0x7a, 0x51, 0x9b, 0xf0, 0x80, 0x48, 0x22, 0xac, 0x3e, 0x09, 0x3a, 0x8c, + 0x5b, 0x7a, 0xc1, 0x27, 0x92, 0x53, 0x57, 0x58, 0x61, 0xcf, 0xb3, 0x70, 0x48, 0xc5, 0xb0, 0xd0, + 0x5f, 0xc4, 0xbb, 0x61, 0x17, 0x2f, 0x5a, 0x1e, 0x09, 0x08, 0xc7, 0x92, 0x74, 0x50, 0xc8, 0x99, + 0x64, 0x46, 0x2b, 0x45, 0x22, 0xbd, 0x11, 0x85, 0x3d, 0x0f, 0x25, 0xc8, 0x61, 0x21, 0x43, 0xd6, + 0x17, 0x3c, 0x2a, 0xbb, 0x51, 0x1b, 0xb9, 0xcc, 0xb7, 0x3c, 0xe6, 0x31, 0x4b, 0x11, 0xb4, 0xa3, + 0xd7, 0xea, 0x4b, 0x7d, 0xa8, 0x5f, 0x29, 0x71, 0xbd, 0xa9, 0x5b, 0xc2, 0x21, 0xb5, 0x5c, 0xc6, + 0x89, 0xd5, 0x1f, 0x13, 0xaf, 0x2f, 0xe7, 0x7b, 0x7c, 0xec, 0x76, 0x69, 0x40, 0xf8, 0x20, 0xeb, + 0xdd, 0xe2, 0x44, 0xb0, 0x88, 0xbb, 0xe4, 0x9f, 0x50, 0xea, 0xc4, 0xf8, 0x2c, 0x2d, 0xeb, 0x6f, + 0x28, 0x1e, 0x05, 0x92, 0xfa, 0xe3, 0x32, 0x77, 0x2f, 0x02, 0x08, 0xb7, 0x4b, 0x7c, 0x7c, 0x1a, + 0xd7, 0x7c, 0x5f, 0x86, 0xb3, 0xf7, 0x59, 0x20, 0x71, 0x82, 0xd8, 0x48, 0x5d, 0x34, 0xe6, 0x61, + 0x25, 0xc0, 0x3e, 0x99, 0x03, 0xf3, 0xa0, 0x35, 0x6d, 0x5f, 0xdb, 0x3f, 0x6a, 0x94, 0xe2, 0xa3, + 0x46, 0x65, 0x13, 0xfb, 0xc4, 0x51, 0x2b, 0x46, 0x0c, 0x60, 0x35, 0x12, 0xd8, 0x23, 0x73, 0x13, + 0xf3, 0xe5, 0x56, 0x6d, 0x69, 0x1d, 0x15, 0x9d, 0x0c, 0x3a, 0xad, 0x86, 0x9e, 0x24, 0x3c, 0xeb, + 0x81, 0xe4, 0x03, 0xfb, 0x03, 0xd0, 0x5a, 0x55, 0x55, 0xfc, 0x75, 0xd4, 0x68, 0x8c, 0x0f, 0x06, + 0x39, 0xda, 0xeb, 0x87, 0x54, 0xc8, 0x77, 0xc7, 0xe7, 0x6e, 0x49, 0x5a, 0xfe, 0x78, 0xdc, 0x58, + 0x28, 0x32, 0x3a, 0xb4, 0x15, 0xe1, 0x40, 0x52, 0x39, 0x70, 0xd2, 0xa3, 0xd5, 0xbb, 0x10, 0xe6, + 0xbd, 0x19, 0xb3, 0xb0, 0xdc, 0x23, 0x83, 0xd4, 0x13, 0x27, 0xf9, 0x69, 0xac, 0xc1, 0x6a, 0x1f, + 0xef, 0x46, 0x89, 0x07, 0xa0, 0x55, 0x5b, 0x42, 0x99, 0x07, 0xa3, 0x2a, 0x99, 0x11, 0xe8, 0x0c, + 0x15, 0x05, 0x5e, 0x9d, 0x58, 0x01, 0xcd, 0x9f, 0x15, 0x58, 0xdb, 0x64, 0x1d, 0x92, 0x0d, 0xe0, + 0x15, 0x9c, 0x4a, 0x92, 0xd1, 0xc1, 0x12, 0x2b, 0xc1, 0xda, 0xd2, 0xed, 0xf3, 0xc8, 0x95, 0xcb, + 0x18, 0xf5, 0x17, 0xd1, 0xa3, 0xf6, 0x1b, 0xe2, 0xca, 0x0d, 0x22, 0xb1, 0x6d, 0x68, 0x2b, 0x61, + 0x5e, 0x73, 0x86, 0xac, 0xc6, 0x0b, 0x38, 0x9d, 0xc4, 0x42, 0x48, 0xec, 0x87, 0xba, 0xff, 0x5b, + 0xc5, 0x24, 0xb6, 0xa9, 0x4f, 0xec, 0x9b, 0x9a, 0x7c, 0x7a, 0x3b, 0x23, 0x71, 0x72, 0x3e, 0xe3, + 0x29, 0x9c, 0xdc, 0xa3, 0x41, 0x87, 0xed, 0xcd, 0x95, 0x2f, 0x76, 0x26, 0x67, 0x5e, 0x8b, 0x38, + 0x96, 0x94, 0x05, 0xf6, 0x8c, 0x66, 0x9f, 0x7c, 0xa6, 0x58, 0x1c, 0xcd, 0x66, 0x7c, 0x1b, 0xa6, + 0xae, 0xa2, 0x52, 0x77, 0xaf, 0x78, 0xea, 0x46, 0xdc, 0xbd, 0x0a, 0x1c, 0x68, 0x7e, 0x05, 0xf0, + 0xc6, 0x88, 0x25, 0xc9, 0xc1, 0x8c, 0x97, 0x63, 0xa1, 0x2b, 0x38, 0xb7, 0x04, 0xad, 0x22, 0x37, + 0xab, 0xcd, 0x9c, 0xca, 0x2a, 0x23, 0x81, 0xdb, 0x81, 0x55, 0x2a, 0x89, 0x2f, 0xf4, 0x83, 0x71, + 0xe7, 0x52, 0xa3, 0xb3, 0xaf, 0x67, 0xe3, 0x7a, 0x90, 0x70, 0x39, 0x29, 0x65, 0xf3, 0x73, 0x19, + 0xc2, 0xc7, 0xac, 0x73, 0x75, 0x7b, 0xce, 0xbd, 0x3d, 0x01, 0x84, 0x6e, 0xf6, 0xf6, 0x0a, 0x7d, + 0x83, 0x56, 0x2f, 0xff, 0x6e, 0xe7, 0x16, 0x0d, 0x57, 0x84, 0x33, 0xa2, 0xd0, 0xfc, 0x02, 0xe0, + 0x4c, 0x3e, 0x95, 0xff, 0x10, 0xb1, 0xe7, 0x7f, 0x46, 0x6c, 0xb9, 0xf8, 0xd9, 0xf2, 0x36, 0xcf, + 0x4e, 0x98, 0xbd, 0xb9, 0x7f, 0x62, 0x96, 0x0e, 0x4e, 0xcc, 0xd2, 0xe1, 0x89, 0x59, 0x7a, 0x1b, + 0x9b, 0x60, 0x3f, 0x36, 0xc1, 0x41, 0x6c, 0x82, 0xc3, 0xd8, 0x04, 0xdf, 0x63, 0x13, 0x7c, 0xfa, + 0x61, 0x96, 0x76, 0x5a, 0x45, 0xff, 0xd8, 0xfc, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x8c, 0xe2, 0xb1, + 0xf3, 0x1c, 0x09, 0x00, 0x00, +} + +func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&ContainerMetrics{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&NodeMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NodeMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NodeMetrics", "NodeMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodMetrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ContainerMetrics{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&PodMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodMetrics", "PodMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NodeMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ContainerMetrics{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto new file mode 100644 index 00000000000..d1938a85c48 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/generated.proto @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.metrics.pkg.apis.metrics.v1alpha1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/metrics/pkg/apis/metrics/v1alpha1"; + +// ContainerMetrics sets resource usage metrics of a container. +message ContainerMetrics { + // Container name corresponding to the one from pod.spec.containers. + optional string name = 1; + + // The memory usage is the memory working set. + map usage = 2; +} + +// NodeMetrics sets resource usage metrics of a node. +message NodeMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // The memory usage is the memory working set. + map usage = 4; +} + +// NodeMetricsList is a list of NodeMetrics. +message NodeMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of node metrics. + repeated NodeMetrics items = 2; +} + +// PodMetrics sets resource usage metrics of a pod. +message PodMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // Metrics for all containers are collected within the same time window. + repeated ContainerMetrics containers = 4; +} + +// PodMetricsList is a list of PodMetrics. +message PodMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod metrics. + repeated PodMetrics items = 2; +} + diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go new file mode 100644 index 00000000000..3e5359a8ee3 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1alpha1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go new file mode 100644 index 00000000000..871a3b1777b --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/types.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,4,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of node metrics. + Items []NodeMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // Metrics for all containers are collected within the same time window. + Containers []ContainerMetrics `json:"containers" protobuf:"bytes,4,rep,name=containers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod metrics. + Items []PodMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,2,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go new file mode 100644 index 00000000000..f29d646594f --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.conversion.go @@ -0,0 +1,209 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + metrics "k8s.io/metrics/pkg/apis/metrics" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ContainerMetrics)(nil), (*metrics.ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(a.(*ContainerMetrics), b.(*metrics.ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.ContainerMetrics)(nil), (*ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(a.(*metrics.ContainerMetrics), b.(*ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetrics)(nil), (*metrics.NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(a.(*NodeMetrics), b.(*metrics.NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetrics)(nil), (*NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(a.(*metrics.NodeMetrics), b.(*NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetricsList)(nil), (*metrics.NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(a.(*NodeMetricsList), b.(*metrics.NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetricsList)(nil), (*NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(a.(*metrics.NodeMetricsList), b.(*NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetrics)(nil), (*metrics.PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics(a.(*PodMetrics), b.(*metrics.PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetrics)(nil), (*PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics(a.(*metrics.PodMetrics), b.(*PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetricsList)(nil), (*metrics.PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(a.(*PodMetricsList), b.(*metrics.PodMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetricsList)(nil), (*PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(a.(*metrics.PodMetricsList), b.(*PodMetricsList), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics is an autogenerated conversion function. +func Convert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_ContainerMetrics_To_metrics_ContainerMetrics(in, out, s) +} + +func autoConvert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics is an autogenerated conversion function. +func Convert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + return autoConvert_metrics_ContainerMetrics_To_v1alpha1_ContainerMetrics(in, out, s) +} + +func autoConvert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics is an autogenerated conversion function. +func Convert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_NodeMetrics_To_metrics_NodeMetrics(in, out, s) +} + +func autoConvert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics is an autogenerated conversion function. +func Convert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + return autoConvert_metrics_NodeMetrics_To_v1alpha1_NodeMetrics(in, out, s) +} + +func autoConvert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList is an autogenerated conversion function. +func Convert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + return autoConvert_v1alpha1_NodeMetricsList_To_metrics_NodeMetricsList(in, out, s) +} + +func autoConvert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList is an autogenerated conversion function. +func Convert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + return autoConvert_metrics_NodeMetricsList_To_v1alpha1_NodeMetricsList(in, out, s) +} + +func autoConvert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]metrics.ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics is an autogenerated conversion function. +func Convert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + return autoConvert_v1alpha1_PodMetrics_To_metrics_PodMetrics(in, out, s) +} + +func autoConvert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics is an autogenerated conversion function. +func Convert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + return autoConvert_metrics_PodMetrics_To_v1alpha1_PodMetrics(in, out, s) +} + +func autoConvert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList is an autogenerated conversion function. +func Convert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + return autoConvert_v1alpha1_PodMetricsList_To_metrics_PodMetricsList(in, out, s) +} + +func autoConvert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList is an autogenerated conversion function. +func Convert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + return autoConvert_metrics_PodMetricsList_To_v1alpha1_PodMetricsList(in, out, s) +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..9cd8619eccb --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1alpha1/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go new file mode 100644 index 00000000000..10f5ab9fa5f --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +k8s:protobuf-gen=package +// +k8s:conversion-gen=k8s.io/metrics/pkg/apis/metrics +// +k8s:openapi-gen=true +// +groupName=metrics.k8s.io + +// Package v1beta1 is the v1beta1 version of the metrics API. +package v1beta1 // import "k8s.io/metrics/pkg/apis/metrics/v1beta1" diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go new file mode 100644 index 00000000000..20f558b89c6 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.pb.go @@ -0,0 +1,1758 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto + +package v1beta1 + +import ( + fmt "fmt" + + io "io" + + proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" + + k8s_io_api_core_v1 "k8s.io/api/core/v1" + k8s_io_apimachinery_pkg_api_resource "k8s.io/apimachinery/pkg/api/resource" + resource "k8s.io/apimachinery/pkg/api/resource" + + math "math" + math_bits "math/bits" + reflect "reflect" + strings "strings" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package + +func (m *ContainerMetrics) Reset() { *m = ContainerMetrics{} } +func (*ContainerMetrics) ProtoMessage() {} +func (*ContainerMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{0} +} +func (m *ContainerMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ContainerMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ContainerMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_ContainerMetrics.Merge(m, src) +} +func (m *ContainerMetrics) XXX_Size() int { + return m.Size() +} +func (m *ContainerMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_ContainerMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_ContainerMetrics proto.InternalMessageInfo + +func (m *NodeMetrics) Reset() { *m = NodeMetrics{} } +func (*NodeMetrics) ProtoMessage() {} +func (*NodeMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{1} +} +func (m *NodeMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetrics.Merge(m, src) +} +func (m *NodeMetrics) XXX_Size() int { + return m.Size() +} +func (m *NodeMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetrics proto.InternalMessageInfo + +func (m *NodeMetricsList) Reset() { *m = NodeMetricsList{} } +func (*NodeMetricsList) ProtoMessage() {} +func (*NodeMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{2} +} +func (m *NodeMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *NodeMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *NodeMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_NodeMetricsList.Merge(m, src) +} +func (m *NodeMetricsList) XXX_Size() int { + return m.Size() +} +func (m *NodeMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_NodeMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_NodeMetricsList proto.InternalMessageInfo + +func (m *PodMetrics) Reset() { *m = PodMetrics{} } +func (*PodMetrics) ProtoMessage() {} +func (*PodMetrics) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{3} +} +func (m *PodMetrics) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetrics) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetrics) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetrics.Merge(m, src) +} +func (m *PodMetrics) XXX_Size() int { + return m.Size() +} +func (m *PodMetrics) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetrics.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetrics proto.InternalMessageInfo + +func (m *PodMetricsList) Reset() { *m = PodMetricsList{} } +func (*PodMetricsList) ProtoMessage() {} +func (*PodMetricsList) Descriptor() ([]byte, []int) { + return fileDescriptor_3e7a045767f4b09f, []int{4} +} +func (m *PodMetricsList) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *PodMetricsList) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *PodMetricsList) XXX_Merge(src proto.Message) { + xxx_messageInfo_PodMetricsList.Merge(m, src) +} +func (m *PodMetricsList) XXX_Size() int { + return m.Size() +} +func (m *PodMetricsList) XXX_DiscardUnknown() { + xxx_messageInfo_PodMetricsList.DiscardUnknown(m) +} + +var xxx_messageInfo_PodMetricsList proto.InternalMessageInfo + +func init() { + proto.RegisterType((*ContainerMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.ContainerMetrics.UsageEntry") + proto.RegisterType((*NodeMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics") + proto.RegisterMapType((k8s_io_api_core_v1.ResourceList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetrics.UsageEntry") + proto.RegisterType((*NodeMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.NodeMetricsList") + proto.RegisterType((*PodMetrics)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetrics") + proto.RegisterType((*PodMetricsList)(nil), "k8s.io.metrics.pkg.apis.metrics.v1beta1.PodMetricsList") +} + +func init() { + proto.RegisterFile("k8s.io/kubernetes/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto", fileDescriptor_3e7a045767f4b09f) +} + +var fileDescriptor_3e7a045767f4b09f = []byte{ + // 661 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xec, 0x56, 0xbf, 0x6f, 0x13, 0x3f, + 0x1c, 0x8d, 0x9b, 0xa4, 0x6a, 0x9d, 0xef, 0xb7, 0x94, 0x9b, 0xaa, 0x0c, 0x97, 0x2a, 0x4b, 0x2b, + 0xa4, 0xda, 0xb4, 0x54, 0xa8, 0xb0, 0x20, 0x1d, 0x65, 0x40, 0xa2, 0xa5, 0x9c, 0xca, 0x6f, 0x06, + 0x9c, 0x8b, 0xb9, 0x98, 0x70, 0xe7, 0xc8, 0xf6, 0xa5, 0xca, 0x86, 0x2a, 0x26, 0x26, 0xc4, 0x5f, + 0x15, 0x31, 0x75, 0xec, 0x80, 0x5a, 0x12, 0x66, 0xfe, 0x01, 0x26, 0x74, 0x3e, 0x5f, 0x2e, 0x34, + 0xa5, 0x3d, 0x3a, 0x30, 0x75, 0xcb, 0x7d, 0xec, 0xf7, 0xde, 0xc7, 0xef, 0xf3, 0x6c, 0x05, 0xee, + 0xb4, 0x37, 0x24, 0x62, 0x1c, 0xb7, 0xa3, 0x06, 0x15, 0x21, 0x55, 0x54, 0xe2, 0x2e, 0x0d, 0x9b, + 0x5c, 0x60, 0xb3, 0x10, 0x50, 0x25, 0x98, 0x27, 0x71, 0xa7, 0xed, 0x63, 0xd2, 0x61, 0x72, 0x54, + 0xe8, 0xae, 0x36, 0xa8, 0x22, 0xab, 0xd8, 0xa7, 0x21, 0x15, 0x44, 0xd1, 0x26, 0xea, 0x08, 0xae, + 0xb8, 0xb5, 0x94, 0x00, 0x91, 0xd9, 0x87, 0x3a, 0x6d, 0x1f, 0xc5, 0xc0, 0x51, 0xc1, 0x00, 0xab, + 0x2b, 0x3e, 0x53, 0xad, 0xa8, 0x81, 0x3c, 0x1e, 0x60, 0x9f, 0xfb, 0x1c, 0x6b, 0x7c, 0x23, 0x7a, + 0xa3, 0xbf, 0xf4, 0x87, 0xfe, 0x95, 0xf0, 0x56, 0xeb, 0xa6, 0x21, 0xd2, 0x61, 0xd8, 0xe3, 0x82, + 0xe2, 0xee, 0x84, 0x76, 0x75, 0x3d, 0xdb, 0x13, 0x10, 0xaf, 0xc5, 0x42, 0x2a, 0x7a, 0x69, 0xe7, + 0x58, 0x50, 0xc9, 0x23, 0xe1, 0xd1, 0xbf, 0x42, 0xe9, 0xf3, 0x92, 0xd3, 0xb4, 0xf0, 0x9f, 0x50, + 0x22, 0x0a, 0x15, 0x0b, 0x26, 0x65, 0x6e, 0x9e, 0x07, 0x90, 0x5e, 0x8b, 0x06, 0xe4, 0x24, 0xae, + 0xbe, 0x5f, 0x84, 0xf3, 0x77, 0x79, 0xa8, 0x48, 0x8c, 0xd8, 0x4a, 0x4c, 0xb4, 0x16, 0x61, 0x29, + 0x24, 0x01, 0x5d, 0x00, 0x8b, 0x60, 0x79, 0xd6, 0xf9, 0xaf, 0x7f, 0x54, 0x2b, 0x0c, 0x8f, 0x6a, + 0xa5, 0x6d, 0x12, 0x50, 0x57, 0xaf, 0x58, 0x03, 0x00, 0xcb, 0x91, 0x24, 0x3e, 0x5d, 0x98, 0x5a, + 0x2c, 0x2e, 0x57, 0xd6, 0x36, 0x51, 0xce, 0xc1, 0xa0, 0x93, 0x62, 0xe8, 0x71, 0x4c, 0x73, 0x2f, + 0x54, 0xa2, 0xe7, 0x7c, 0x00, 0x46, 0xaa, 0xac, 0x8b, 0x3f, 0x8f, 0x6a, 0xb5, 0xc9, 0xb9, 0x20, + 0xd7, 0x58, 0xfd, 0x80, 0x49, 0xb5, 0x7f, 0x7c, 0xe6, 0x96, 0xb8, 0xe3, 0x8f, 0xc7, 0xb5, 0x95, + 0x3c, 0x93, 0x43, 0x8f, 0x22, 0x12, 0x2a, 0xa6, 0x7a, 0x6e, 0x72, 0xb2, 0x6a, 0x0b, 0xc2, 0xac, + 0x37, 0x6b, 0x1e, 0x16, 0xdb, 0xb4, 0x97, 0x58, 0xe2, 0xc6, 0x3f, 0xad, 0x4d, 0x58, 0xee, 0x92, + 0x77, 0x51, 0x6c, 0x01, 0x58, 0xae, 0xac, 0xa1, 0xd4, 0x82, 0x71, 0x95, 0xd4, 0x07, 0x74, 0x8a, + 0x8a, 0x06, 0xdf, 0x9e, 0xda, 0x00, 0xf5, 0x1f, 0x25, 0x58, 0xd9, 0xe6, 0x4d, 0x9a, 0xfa, 0xff, + 0x1a, 0xce, 0xc4, 0xc1, 0x68, 0x12, 0x45, 0xb4, 0x60, 0x65, 0xed, 0xfa, 0x59, 0xe4, 0xda, 0x64, + 0x82, 0xba, 0xab, 0xe8, 0x61, 0xe3, 0x2d, 0xf5, 0xd4, 0x16, 0x55, 0xc4, 0xb1, 0x8c, 0x95, 0x30, + 0xab, 0xb9, 0x23, 0x56, 0xeb, 0x25, 0x9c, 0x8d, 0x53, 0x21, 0x15, 0x09, 0x3a, 0xa6, 0xff, 0x6b, + 0xf9, 0x24, 0x76, 0x59, 0x40, 0x9d, 0xab, 0x86, 0x7c, 0x76, 0x37, 0x25, 0x71, 0x33, 0x3e, 0xeb, + 0x09, 0x9c, 0xde, 0x63, 0x61, 0x93, 0xef, 0x2d, 0x14, 0xcf, 0x77, 0x26, 0x63, 0xde, 0x8c, 0x04, + 0x51, 0x8c, 0x87, 0xce, 0x9c, 0x61, 0x9f, 0x7e, 0xaa, 0x59, 0x5c, 0xc3, 0x66, 0x7d, 0x1d, 0x85, + 0xae, 0xa4, 0x43, 0x77, 0x27, 0x77, 0xe8, 0xc6, 0xcc, 0xbd, 0xcc, 0x1b, 0xa8, 0x7f, 0x01, 0xf0, + 0xca, 0x98, 0x25, 0xf1, 0xc1, 0xac, 0x57, 0x13, 0x99, 0xcb, 0x39, 0xb6, 0x18, 0xad, 0x13, 0x37, + 0x6f, 0xcc, 0x9c, 0x49, 0x2b, 0x63, 0x79, 0x7b, 0x0e, 0xcb, 0x4c, 0xd1, 0x40, 0x9a, 0xe7, 0x62, + 0xfd, 0x22, 0x93, 0x73, 0xfe, 0x4f, 0xa7, 0x75, 0x3f, 0xa6, 0x72, 0x13, 0xc6, 0xfa, 0xe7, 0x22, + 0x84, 0x3b, 0xbc, 0x79, 0x79, 0x77, 0xce, 0xbc, 0x3b, 0x01, 0x84, 0x5e, 0xfa, 0xf2, 0x4a, 0x73, + 0x7f, 0x6e, 0x5d, 0xf8, 0xd1, 0xce, 0x1c, 0x1a, 0xad, 0x48, 0x77, 0x4c, 0xa0, 0xde, 0x07, 0x70, + 0x2e, 0x1b, 0xca, 0x3f, 0x08, 0xd8, 0xb3, 0xdf, 0x03, 0x76, 0x23, 0xf7, 0xd1, 0xb2, 0x2e, 0x4f, + 0xcf, 0x97, 0xb3, 0xd5, 0x1f, 0xd8, 0x85, 0x83, 0x81, 0x5d, 0x38, 0x1c, 0xd8, 0x85, 0xf7, 0x43, + 0x1b, 0xf4, 0x87, 0x36, 0x38, 0x18, 0xda, 0xe0, 0x70, 0x68, 0x83, 0x6f, 0x43, 0x1b, 0x7c, 0xfa, + 0x6e, 0x17, 0x5e, 0x2c, 0xe5, 0xfc, 0x47, 0xf3, 0x2b, 0x00, 0x00, 0xff, 0xff, 0x26, 0x83, 0xd3, + 0x6d, 0x14, 0x09, 0x00, 0x00, +} + +func (m *ContainerMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ContainerMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ContainerMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x12 + } + } + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGenerated(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Usage) > 0 { + keysForUsage := make([]string, 0, len(m.Usage)) + for k := range m.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + for iNdEx := len(keysForUsage) - 1; iNdEx >= 0; iNdEx-- { + v := m.Usage[k8s_io_api_core_v1.ResourceName(keysForUsage[iNdEx])] + baseI := i + { + size, err := ((*resource.Quantity)(&v)).MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + i -= len(keysForUsage[iNdEx]) + copy(dAtA[i:], keysForUsage[iNdEx]) + i = encodeVarintGenerated(dAtA, i, uint64(len(keysForUsage[iNdEx]))) + i-- + dAtA[i] = 0xa + i = encodeVarintGenerated(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *NodeMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *NodeMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *NodeMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetrics) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetrics) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetrics) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Containers) > 0 { + for iNdEx := len(m.Containers) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Containers[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } + { + size, err := m.Window.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1a + { + size, err := m.Timestamp.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + { + size, err := m.ObjectMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func (m *PodMetricsList) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *PodMetricsList) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *PodMetricsList) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Items) > 0 { + for iNdEx := len(m.Items) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Items[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + } + { + size, err := m.ListMeta.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGenerated(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + return len(dAtA) - i, nil +} + +func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int { + offset -= sovGenerated(v) + base := offset + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return base +} +func (m *ContainerMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Usage) > 0 { + for k, v := range m.Usage { + _ = k + _ = v + l = ((*resource.Quantity)(&v)).Size() + mapEntrySize := 1 + len(k) + sovGenerated(uint64(len(k))) + 1 + l + sovGenerated(uint64(l)) + n += mapEntrySize + 1 + sovGenerated(uint64(mapEntrySize)) + } + } + return n +} + +func (m *NodeMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetrics) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ObjectMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Timestamp.Size() + n += 1 + l + sovGenerated(uint64(l)) + l = m.Window.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Containers) > 0 { + for _, e := range m.Containers { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func (m *PodMetricsList) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = m.ListMeta.Size() + n += 1 + l + sovGenerated(uint64(l)) + if len(m.Items) > 0 { + for _, e := range m.Items { + l = e.Size() + n += 1 + l + sovGenerated(uint64(l)) + } + } + return n +} + +func sovGenerated(x uint64) (n int) { + return (math_bits.Len64(x|1) + 6) / 7 +} +func sozGenerated(x uint64) (n int) { + return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *ContainerMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&ContainerMetrics{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetrics) String() string { + if this == nil { + return "nil" + } + keysForUsage := make([]string, 0, len(this.Usage)) + for k := range this.Usage { + keysForUsage = append(keysForUsage, string(k)) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForUsage) + mapStringForUsage := "k8s_io_api_core_v1.ResourceList{" + for _, k := range keysForUsage { + mapStringForUsage += fmt.Sprintf("%v: %v,", k, this.Usage[k8s_io_api_core_v1.ResourceName(k)]) + } + mapStringForUsage += "}" + s := strings.Join([]string{`&NodeMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Usage:` + mapStringForUsage + `,`, + `}`, + }, "") + return s +} +func (this *NodeMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]NodeMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "NodeMetrics", "NodeMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&NodeMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func (this *PodMetrics) String() string { + if this == nil { + return "nil" + } + repeatedStringForContainers := "[]ContainerMetrics{" + for _, f := range this.Containers { + repeatedStringForContainers += strings.Replace(strings.Replace(f.String(), "ContainerMetrics", "ContainerMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForContainers += "}" + s := strings.Join([]string{`&PodMetrics{`, + `ObjectMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ObjectMeta), "ObjectMeta", "v1.ObjectMeta", 1), `&`, ``, 1) + `,`, + `Timestamp:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Timestamp), "Time", "v1.Time", 1), `&`, ``, 1) + `,`, + `Window:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.Window), "Duration", "v1.Duration", 1), `&`, ``, 1) + `,`, + `Containers:` + repeatedStringForContainers + `,`, + `}`, + }, "") + return s +} +func (this *PodMetricsList) String() string { + if this == nil { + return "nil" + } + repeatedStringForItems := "[]PodMetrics{" + for _, f := range this.Items { + repeatedStringForItems += strings.Replace(strings.Replace(f.String(), "PodMetrics", "PodMetrics", 1), `&`, ``, 1) + "," + } + repeatedStringForItems += "}" + s := strings.Join([]string{`&PodMetricsList{`, + `ListMeta:` + strings.Replace(strings.Replace(fmt.Sprintf("%v", this.ListMeta), "ListMeta", "v1.ListMeta", 1), `&`, ``, 1) + `,`, + `Items:` + repeatedStringForItems + `,`, + `}`, + }, "") + return s +} +func valueToStringGenerated(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *ContainerMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ContainerMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ContainerMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Usage", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Usage == nil { + m.Usage = make(k8s_io_api_core_v1.ResourceList) + } + var mapkey k8s_io_api_core_v1.ResourceName + mapvalue := &resource.Quantity{} + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthGenerated + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthGenerated + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = k8s_io_api_core_v1.ResourceName(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthGenerated + } + postmsgIndex := iNdEx + mapmsglen + if postmsgIndex < 0 { + return ErrInvalidLengthGenerated + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &resource.Quantity{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Usage[k8s_io_api_core_v1.ResourceName(mapkey)] = ((k8s_io_apimachinery_pkg_api_resource.Quantity)(*mapvalue)) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *NodeMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: NodeMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: NodeMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, NodeMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetrics) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetrics: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetrics: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ObjectMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ObjectMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamp", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Timestamp.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Window", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.Window.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Containers", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Containers = append(m.Containers, ContainerMetrics{}) + if err := m.Containers[len(m.Containers)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *PodMetricsList) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: PodMetricsList: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: PodMetricsList: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListMeta", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.ListMeta.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Items", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGenerated + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGenerated + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGenerated + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Items = append(m.Items, PodMetrics{}) + if err := m.Items[len(m.Items)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGenerated(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGenerated + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipGenerated(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + depth := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + case 1: + iNdEx += 8 + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowGenerated + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if length < 0 { + return 0, ErrInvalidLengthGenerated + } + iNdEx += length + case 3: + depth++ + case 4: + if depth == 0 { + return 0, ErrUnexpectedEndOfGroupGenerated + } + depth-- + case 5: + iNdEx += 4 + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + if iNdEx < 0 { + return 0, ErrInvalidLengthGenerated + } + if depth == 0 { + return iNdEx, nil + } + } + return 0, io.ErrUnexpectedEOF +} + +var ( + ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow") + ErrUnexpectedEndOfGroupGenerated = fmt.Errorf("proto: unexpected end of group") +) diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto new file mode 100644 index 00000000000..b2c1f6d730c --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/generated.proto @@ -0,0 +1,95 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + + +// This file was autogenerated by go-to-protobuf. Do not edit it manually! + +syntax = "proto2"; + +package k8s.io.metrics.pkg.apis.metrics.v1beta1; + +import "k8s.io/api/core/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/api/resource/generated.proto"; +import "k8s.io/apimachinery/pkg/apis/meta/v1/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/generated.proto"; +import "k8s.io/apimachinery/pkg/runtime/schema/generated.proto"; + +// Package-wide variables from generator "generated". +option go_package = "k8s.io/metrics/pkg/apis/metrics/v1beta1"; + +// ContainerMetrics sets resource usage metrics of a container. +message ContainerMetrics { + // Container name corresponding to the one from pod.spec.containers. + optional string name = 1; + + // The memory usage is the memory working set. + map usage = 2; +} + +// NodeMetrics sets resource usage metrics of a node. +message NodeMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // The memory usage is the memory working set. + map usage = 4; +} + +// NodeMetricsList is a list of NodeMetrics. +message NodeMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of node metrics. + repeated NodeMetrics items = 2; +} + +// PodMetrics sets resource usage metrics of a pod. +message PodMetrics { + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + optional k8s.io.apimachinery.pkg.apis.meta.v1.ObjectMeta metadata = 1; + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + optional k8s.io.apimachinery.pkg.apis.meta.v1.Time timestamp = 2; + + optional k8s.io.apimachinery.pkg.apis.meta.v1.Duration window = 3; + + // Metrics for all containers are collected within the same time window. + repeated ContainerMetrics containers = 4; +} + +// PodMetricsList is a list of PodMetrics. +message PodMetricsList { + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + optional k8s.io.apimachinery.pkg.apis.meta.v1.ListMeta metadata = 1; + + // List of pod metrics. + repeated PodMetrics items = 2; +} + diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go new file mode 100644 index 00000000000..205d253c779 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/register.go @@ -0,0 +1,53 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +// GroupName is the group name use in this package +const GroupName = "metrics.k8s.io" + +// SchemeGroupVersion is group version used to register these objects +var SchemeGroupVersion = schema.GroupVersion{Group: GroupName, Version: "v1beta1"} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +var ( + // SchemeBuilder points to a list of functions added to Scheme. + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + localSchemeBuilder = &SchemeBuilder + // AddToScheme applies all the stored functions to the scheme. + AddToScheme = SchemeBuilder.AddToScheme +) + +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &NodeMetrics{}, + &NodeMetricsList{}, + &PodMetrics{}, + &PodMetricsList{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go new file mode 100644 index 00000000000..530797b5bf6 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/types.go @@ -0,0 +1,101 @@ +/* +Copyright 2017 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// +genclient +// +resourceName=nodes +// +genclient:readonly +// +genclient:nonNamespaced +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetrics sets resource usage metrics of a node. +type NodeMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,4,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// NodeMetricsList is a list of NodeMetrics. +type NodeMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of node metrics. + Items []NodeMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// +genclient +// +resourceName=pods +// +genclient:readonly +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetrics sets resource usage metrics of a pod. +type PodMetrics struct { + metav1.TypeMeta `json:",inline"` + // Standard object's metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata + // +optional + metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // The following fields define time interval from which metrics were + // collected from the interval [Timestamp-Window, Timestamp]. + Timestamp metav1.Time `json:"timestamp" protobuf:"bytes,2,opt,name=timestamp"` + Window metav1.Duration `json:"window" protobuf:"bytes,3,opt,name=window"` + + // Metrics for all containers are collected within the same time window. + Containers []ContainerMetrics `json:"containers" protobuf:"bytes,4,rep,name=containers"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// PodMetricsList is a list of PodMetrics. +type PodMetricsList struct { + metav1.TypeMeta `json:",inline"` + // Standard list metadata. + // More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"` + + // List of pod metrics. + Items []PodMetrics `json:"items" protobuf:"bytes,2,rep,name=items"` +} + +// ContainerMetrics sets resource usage metrics of a container. +type ContainerMetrics struct { + // Container name corresponding to the one from pod.spec.containers. + Name string `json:"name" protobuf:"bytes,1,opt,name=name"` + // The memory usage is the memory working set. + Usage v1.ResourceList `json:"usage" protobuf:"bytes,2,rep,name=usage,casttype=k8s.io/api/core/v1.ResourceList,castkey=k8s.io/api/core/v1.ResourceName,castvalue=k8s.io/apimachinery/pkg/api/resource.Quantity"` +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go new file mode 100644 index 00000000000..112c4c707d9 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.conversion.go @@ -0,0 +1,209 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1beta1 + +import ( + unsafe "unsafe" + + v1 "k8s.io/api/core/v1" + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + metrics "k8s.io/metrics/pkg/apis/metrics" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*ContainerMetrics)(nil), (*metrics.ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(a.(*ContainerMetrics), b.(*metrics.ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.ContainerMetrics)(nil), (*ContainerMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(a.(*metrics.ContainerMetrics), b.(*ContainerMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetrics)(nil), (*metrics.NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(a.(*NodeMetrics), b.(*metrics.NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetrics)(nil), (*NodeMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(a.(*metrics.NodeMetrics), b.(*NodeMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*NodeMetricsList)(nil), (*metrics.NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(a.(*NodeMetricsList), b.(*metrics.NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.NodeMetricsList)(nil), (*NodeMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(a.(*metrics.NodeMetricsList), b.(*NodeMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetrics)(nil), (*metrics.PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(a.(*PodMetrics), b.(*metrics.PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetrics)(nil), (*PodMetrics)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(a.(*metrics.PodMetrics), b.(*PodMetrics), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*PodMetricsList)(nil), (*metrics.PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(a.(*PodMetricsList), b.(*metrics.PodMetricsList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*metrics.PodMetricsList)(nil), (*PodMetricsList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(a.(*metrics.PodMetricsList), b.(*PodMetricsList), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics is an autogenerated conversion function. +func Convert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in *ContainerMetrics, out *metrics.ContainerMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_ContainerMetrics_To_metrics_ContainerMetrics(in, out, s) +} + +func autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + out.Name = in.Name + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics is an autogenerated conversion function. +func Convert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in *metrics.ContainerMetrics, out *ContainerMetrics, s conversion.Scope) error { + return autoConvert_metrics_ContainerMetrics_To_v1beta1_ContainerMetrics(in, out, s) +} + +func autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics is an autogenerated conversion function. +func Convert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in *NodeMetrics, out *metrics.NodeMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_NodeMetrics_To_metrics_NodeMetrics(in, out, s) +} + +func autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Usage = *(*v1.ResourceList)(unsafe.Pointer(&in.Usage)) + return nil +} + +// Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics is an autogenerated conversion function. +func Convert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in *metrics.NodeMetrics, out *NodeMetrics, s conversion.Scope) error { + return autoConvert_metrics_NodeMetrics_To_v1beta1_NodeMetrics(in, out, s) +} + +func autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList is an autogenerated conversion function. +func Convert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in *NodeMetricsList, out *metrics.NodeMetricsList, s conversion.Scope) error { + return autoConvert_v1beta1_NodeMetricsList_To_metrics_NodeMetricsList(in, out, s) +} + +func autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]NodeMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList is an autogenerated conversion function. +func Convert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in *metrics.NodeMetricsList, out *NodeMetricsList, s conversion.Scope) error { + return autoConvert_metrics_NodeMetricsList_To_v1beta1_NodeMetricsList(in, out, s) +} + +func autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]metrics.ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_v1beta1_PodMetrics_To_metrics_PodMetrics is an autogenerated conversion function. +func Convert_v1beta1_PodMetrics_To_metrics_PodMetrics(in *PodMetrics, out *metrics.PodMetrics, s conversion.Scope) error { + return autoConvert_v1beta1_PodMetrics_To_metrics_PodMetrics(in, out, s) +} + +func autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + out.Timestamp = in.Timestamp + out.Window = in.Window + out.Containers = *(*[]ContainerMetrics)(unsafe.Pointer(&in.Containers)) + return nil +} + +// Convert_metrics_PodMetrics_To_v1beta1_PodMetrics is an autogenerated conversion function. +func Convert_metrics_PodMetrics_To_v1beta1_PodMetrics(in *metrics.PodMetrics, out *PodMetrics, s conversion.Scope) error { + return autoConvert_metrics_PodMetrics_To_v1beta1_PodMetrics(in, out, s) +} + +func autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]metrics.PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList is an autogenerated conversion function. +func Convert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in *PodMetricsList, out *metrics.PodMetricsList, s conversion.Scope) error { + return autoConvert_v1beta1_PodMetricsList_To_metrics_PodMetricsList(in, out, s) +} + +func autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]PodMetrics)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList is an autogenerated conversion function. +func Convert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in *metrics.PodMetricsList, out *PodMetricsList, s conversion.Scope) error { + return autoConvert_metrics_PodMetricsList_To_v1beta1_PodMetricsList(in, out, s) +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go new file mode 100644 index 00000000000..f043d4642ff --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/v1beta1/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1beta1 + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go new file mode 100644 index 00000000000..c063c9b28a1 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/apis/metrics/zz_generated.deepcopy.go @@ -0,0 +1,186 @@ +//go:build !ignore_autogenerated +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package metrics + +import ( + v1 "k8s.io/api/core/v1" + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *ContainerMetrics) DeepCopyInto(out *ContainerMetrics) { + *out = *in + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ContainerMetrics. +func (in *ContainerMetrics) DeepCopy() *ContainerMetrics { + if in == nil { + return nil + } + out := new(ContainerMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetrics) DeepCopyInto(out *NodeMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Usage != nil { + in, out := &in.Usage, &out.Usage + *out = make(v1.ResourceList, len(*in)) + for key, val := range *in { + (*out)[key] = val.DeepCopy() + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetrics. +func (in *NodeMetrics) DeepCopy() *NodeMetrics { + if in == nil { + return nil + } + out := new(NodeMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *NodeMetricsList) DeepCopyInto(out *NodeMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]NodeMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeMetricsList. +func (in *NodeMetricsList) DeepCopy() *NodeMetricsList { + if in == nil { + return nil + } + out := new(NodeMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *NodeMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetrics) DeepCopyInto(out *PodMetrics) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + in.Timestamp.DeepCopyInto(&out.Timestamp) + out.Window = in.Window + if in.Containers != nil { + in, out := &in.Containers, &out.Containers + *out = make([]ContainerMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetrics. +func (in *PodMetrics) DeepCopy() *PodMetrics { + if in == nil { + return nil + } + out := new(PodMetrics) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetrics) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *PodMetricsList) DeepCopyInto(out *PodMetricsList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]PodMetrics, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new PodMetricsList. +func (in *PodMetricsList) DeepCopy() *PodMetricsList { + if in == nil { + return nil + } + out := new(PodMetricsList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *PodMetricsList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go new file mode 100644 index 00000000000..f5fba6d0c99 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/clientset.go @@ -0,0 +1,133 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package versioned + +import ( + "fmt" + "net/http" + + discovery "k8s.io/client-go/discovery" + rest "k8s.io/client-go/rest" + flowcontrol "k8s.io/client-go/util/flowcontrol" + metricsv1alpha1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1" + metricsv1beta1 "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1" +) + +type Interface interface { + Discovery() discovery.DiscoveryInterface + MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface + MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface +} + +// Clientset contains the clients for groups. +type Clientset struct { + *discovery.DiscoveryClient + metricsV1alpha1 *metricsv1alpha1.MetricsV1alpha1Client + metricsV1beta1 *metricsv1beta1.MetricsV1beta1Client +} + +// MetricsV1alpha1 retrieves the MetricsV1alpha1Client +func (c *Clientset) MetricsV1alpha1() metricsv1alpha1.MetricsV1alpha1Interface { + return c.metricsV1alpha1 +} + +// MetricsV1beta1 retrieves the MetricsV1beta1Client +func (c *Clientset) MetricsV1beta1() metricsv1beta1.MetricsV1beta1Interface { + return c.metricsV1beta1 +} + +// Discovery retrieves the DiscoveryClient +func (c *Clientset) Discovery() discovery.DiscoveryInterface { + if c == nil { + return nil + } + return c.DiscoveryClient +} + +// NewForConfig creates a new Clientset for the given config. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfig will generate a rate-limiter in configShallowCopy. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*Clientset, error) { + configShallowCopy := *c + + if configShallowCopy.UserAgent == "" { + configShallowCopy.UserAgent = rest.DefaultKubernetesUserAgent() + } + + // share the transport between all clients + httpClient, err := rest.HTTPClientFor(&configShallowCopy) + if err != nil { + return nil, err + } + + return NewForConfigAndClient(&configShallowCopy, httpClient) +} + +// NewForConfigAndClient creates a new Clientset for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +// If config's RateLimiter is not set and QPS and Burst are acceptable, +// NewForConfigAndClient will generate a rate-limiter in configShallowCopy. +func NewForConfigAndClient(c *rest.Config, httpClient *http.Client) (*Clientset, error) { + configShallowCopy := *c + if configShallowCopy.RateLimiter == nil && configShallowCopy.QPS > 0 { + if configShallowCopy.Burst <= 0 { + return nil, fmt.Errorf("burst is required to be greater than 0 when RateLimiter is not set and QPS is set to greater than 0") + } + configShallowCopy.RateLimiter = flowcontrol.NewTokenBucketRateLimiter(configShallowCopy.QPS, configShallowCopy.Burst) + } + + var cs Clientset + var err error + cs.metricsV1alpha1, err = metricsv1alpha1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + cs.metricsV1beta1, err = metricsv1beta1.NewForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + + cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfigAndClient(&configShallowCopy, httpClient) + if err != nil { + return nil, err + } + return &cs, nil +} + +// NewForConfigOrDie creates a new Clientset for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *Clientset { + cs, err := NewForConfig(c) + if err != nil { + panic(err) + } + return cs +} + +// New creates a new Clientset for the given RESTClient. +func New(c rest.Interface) *Clientset { + var cs Clientset + cs.metricsV1alpha1 = metricsv1alpha1.New(c) + cs.metricsV1beta1 = metricsv1beta1.New(c) + + cs.DiscoveryClient = discovery.NewDiscoveryClient(c) + return &cs +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go new file mode 100644 index 00000000000..7dc3756168f --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package contains the scheme of the automatically generated clientset. +package scheme diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go new file mode 100644 index 00000000000..a92b020a9ff --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/scheme/register.go @@ -0,0 +1,58 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package scheme + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + schema "k8s.io/apimachinery/pkg/runtime/schema" + serializer "k8s.io/apimachinery/pkg/runtime/serializer" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + metricsv1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + metricsv1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" +) + +var Scheme = runtime.NewScheme() +var Codecs = serializer.NewCodecFactory(Scheme) +var ParameterCodec = runtime.NewParameterCodec(Scheme) +var localSchemeBuilder = runtime.SchemeBuilder{ + metricsv1alpha1.AddToScheme, + metricsv1beta1.AddToScheme, +} + +// AddToScheme adds all types of this clientset into the given scheme. This allows composition +// of clientsets, like in: +// +// import ( +// "k8s.io/client-go/kubernetes" +// clientsetscheme "k8s.io/client-go/kubernetes/scheme" +// aggregatorclientsetscheme "k8s.io/kube-aggregator/pkg/client/clientset_generated/clientset/scheme" +// ) +// +// kclientset, _ := kubernetes.NewForConfig(c) +// _ = aggregatorclientsetscheme.AddToScheme(clientsetscheme.Scheme) +// +// After this, RawExtensions in Kubernetes types will serialize kube-aggregator types +// correctly. +var AddToScheme = localSchemeBuilder.AddToScheme + +func init() { + v1.AddToGroupVersion(Scheme, schema.GroupVersion{Version: "v1"}) + utilruntime.Must(AddToScheme(Scheme)) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go new file mode 100644 index 00000000000..df51baa4d4c --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1alpha1 diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go new file mode 100644 index 00000000000..e8fc33bbb14 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +type NodeMetricsExpansion interface{} + +type PodMetricsExpansion interface{} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go new file mode 100644 index 00000000000..efc23042d47 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/metrics_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +type MetricsV1alpha1Interface interface { + RESTClient() rest.Interface + NodeMetricsesGetter + PodMetricsesGetter +} + +// MetricsV1alpha1Client is used to interact with features provided by the metrics.k8s.io group. +type MetricsV1alpha1Client struct { + restClient rest.Interface +} + +func (c *MetricsV1alpha1Client) NodeMetricses() NodeMetricsInterface { + return newNodeMetricses(c) +} + +func (c *MetricsV1alpha1Client) PodMetricses(namespace string) PodMetricsInterface { + return newPodMetricses(c, namespace) +} + +// NewForConfig creates a new MetricsV1alpha1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MetricsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MetricsV1alpha1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MetricsV1alpha1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MetricsV1alpha1Client{client}, nil +} + +// NewForConfigOrDie creates a new MetricsV1alpha1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MetricsV1alpha1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MetricsV1alpha1Client for the given RESTClient. +func New(c rest.Interface) *MetricsV1alpha1Client { + return &MetricsV1alpha1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1alpha1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MetricsV1alpha1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go new file mode 100644 index 00000000000..d79163ddb81 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/nodemetrics.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// NodeMetricsesGetter has a method to return a NodeMetricsInterface. +// A group's client should implement this interface. +type NodeMetricsesGetter interface { + NodeMetricses() NodeMetricsInterface +} + +// NodeMetricsInterface has methods to work with NodeMetrics resources. +type NodeMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.NodeMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.NodeMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + NodeMetricsExpansion +} + +// nodeMetricses implements NodeMetricsInterface +type nodeMetricses struct { + client rest.Interface +} + +// newNodeMetricses returns a NodeMetricses +func newNodeMetricses(c *MetricsV1alpha1Client) *nodeMetricses { + return &nodeMetricses{ + client: c.RESTClient(), + } +} + +// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. +func (c *nodeMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.NodeMetrics, err error) { + result = &v1alpha1.NodeMetrics{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. +func (c *nodeMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.NodeMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.NodeMetricsList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodeMetricses. +func (c *nodeMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go new file mode 100644 index 00000000000..49d57c8e887 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1alpha1/podmetrics.go @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1alpha1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1alpha1 "k8s.io/metrics/pkg/apis/metrics/v1alpha1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// PodMetricsesGetter has a method to return a PodMetricsInterface. +// A group's client should implement this interface. +type PodMetricsesGetter interface { + PodMetricses(namespace string) PodMetricsInterface +} + +// PodMetricsInterface has methods to work with PodMetrics resources. +type PodMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1alpha1.PodMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1alpha1.PodMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + PodMetricsExpansion +} + +// podMetricses implements PodMetricsInterface +type podMetricses struct { + client rest.Interface + ns string +} + +// newPodMetricses returns a PodMetricses +func newPodMetricses(c *MetricsV1alpha1Client, namespace string) *podMetricses { + return &podMetricses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. +func (c *podMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1alpha1.PodMetrics, err error) { + result = &v1alpha1.PodMetrics{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. +func (c *podMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1alpha1.PodMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1alpha1.PodMetricsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podMetricses. +func (c *podMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go new file mode 100644 index 00000000000..771101956f3 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1beta1 diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go new file mode 100644 index 00000000000..a89ca3c780d --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/generated_expansion.go @@ -0,0 +1,23 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +type NodeMetricsExpansion interface{} + +type PodMetricsExpansion interface{} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go new file mode 100644 index 00000000000..7a02cea2e5e --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/metrics_client.go @@ -0,0 +1,112 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "net/http" + + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +type MetricsV1beta1Interface interface { + RESTClient() rest.Interface + NodeMetricsesGetter + PodMetricsesGetter +} + +// MetricsV1beta1Client is used to interact with features provided by the metrics.k8s.io group. +type MetricsV1beta1Client struct { + restClient rest.Interface +} + +func (c *MetricsV1beta1Client) NodeMetricses() NodeMetricsInterface { + return newNodeMetricses(c) +} + +func (c *MetricsV1beta1Client) PodMetricses(namespace string) PodMetricsInterface { + return newPodMetricses(c, namespace) +} + +// NewForConfig creates a new MetricsV1beta1Client for the given config. +// NewForConfig is equivalent to NewForConfigAndClient(c, httpClient), +// where httpClient was generated with rest.HTTPClientFor(c). +func NewForConfig(c *rest.Config) (*MetricsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + httpClient, err := rest.HTTPClientFor(&config) + if err != nil { + return nil, err + } + return NewForConfigAndClient(&config, httpClient) +} + +// NewForConfigAndClient creates a new MetricsV1beta1Client for the given config and http client. +// Note the http client provided takes precedence over the configured transport values. +func NewForConfigAndClient(c *rest.Config, h *http.Client) (*MetricsV1beta1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientForConfigAndClient(&config, h) + if err != nil { + return nil, err + } + return &MetricsV1beta1Client{client}, nil +} + +// NewForConfigOrDie creates a new MetricsV1beta1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *MetricsV1beta1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new MetricsV1beta1Client for the given RESTClient. +func New(c rest.Interface) *MetricsV1beta1Client { + return &MetricsV1beta1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1beta1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *MetricsV1beta1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go new file mode 100644 index 00000000000..a312221ed25 --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/nodemetrics.go @@ -0,0 +1,98 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// NodeMetricsesGetter has a method to return a NodeMetricsInterface. +// A group's client should implement this interface. +type NodeMetricsesGetter interface { + NodeMetricses() NodeMetricsInterface +} + +// NodeMetricsInterface has methods to work with NodeMetrics resources. +type NodeMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.NodeMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.NodeMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + NodeMetricsExpansion +} + +// nodeMetricses implements NodeMetricsInterface +type nodeMetricses struct { + client rest.Interface +} + +// newNodeMetricses returns a NodeMetricses +func newNodeMetricses(c *MetricsV1beta1Client) *nodeMetricses { + return &nodeMetricses{ + client: c.RESTClient(), + } +} + +// Get takes name of the nodeMetrics, and returns the corresponding nodeMetrics object, and an error if there is any. +func (c *nodeMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.NodeMetrics, err error) { + result = &v1beta1.NodeMetrics{} + err = c.client.Get(). + Resource("nodes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of NodeMetricses that match those selectors. +func (c *nodeMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.NodeMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.NodeMetricsList{} + err = c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested nodeMetricses. +func (c *nodeMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Resource("nodes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +} diff --git a/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go new file mode 100644 index 00000000000..e66c377c25b --- /dev/null +++ b/vendor/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1/podmetrics.go @@ -0,0 +1,103 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1beta1 + +import ( + "context" + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1beta1 "k8s.io/metrics/pkg/apis/metrics/v1beta1" + scheme "k8s.io/metrics/pkg/client/clientset/versioned/scheme" +) + +// PodMetricsesGetter has a method to return a PodMetricsInterface. +// A group's client should implement this interface. +type PodMetricsesGetter interface { + PodMetricses(namespace string) PodMetricsInterface +} + +// PodMetricsInterface has methods to work with PodMetrics resources. +type PodMetricsInterface interface { + Get(ctx context.Context, name string, opts v1.GetOptions) (*v1beta1.PodMetrics, error) + List(ctx context.Context, opts v1.ListOptions) (*v1beta1.PodMetricsList, error) + Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) + PodMetricsExpansion +} + +// podMetricses implements PodMetricsInterface +type podMetricses struct { + client rest.Interface + ns string +} + +// newPodMetricses returns a PodMetricses +func newPodMetricses(c *MetricsV1beta1Client, namespace string) *podMetricses { + return &podMetricses{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the podMetrics, and returns the corresponding podMetrics object, and an error if there is any. +func (c *podMetricses) Get(ctx context.Context, name string, options v1.GetOptions) (result *v1beta1.PodMetrics, err error) { + result = &v1beta1.PodMetrics{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(ctx). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of PodMetricses that match those selectors. +func (c *podMetricses) List(ctx context.Context, opts v1.ListOptions) (result *v1beta1.PodMetricsList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1beta1.PodMetricsList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(ctx). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested podMetricses. +func (c *podMetricses) Watch(ctx context.Context, opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("pods"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch(ctx) +}