diff --git a/go.mod b/go.mod index c397ea5fb..71447a36f 100644 --- a/go.mod +++ b/go.mod @@ -18,7 +18,7 @@ require ( knative.dev/hack v0.0.0-20250109131303-f8be0ccdff36 knative.dev/networking v0.0.0-20250113195501-679796878afc knative.dev/pkg v0.0.0-20250113161000-0c2a238a16ed - knative.dev/serving v0.43.1-0.20250113163001-3e6d275e2bc4 + knative.dev/serving v0.43.1-0.20250115103709-b484fa275687 ) require ( diff --git a/go.sum b/go.sum index 4848a51c6..905572d51 100644 --- a/go.sum +++ b/go.sum @@ -828,8 +828,8 @@ knative.dev/networking v0.0.0-20250113195501-679796878afc h1:bzQlBSOXd7Uz+/ugX8b knative.dev/networking v0.0.0-20250113195501-679796878afc/go.mod h1:Coz36qc6dZK219RRTPuzQf4YTrCjoqN5T2hcr14RNfI= knative.dev/pkg v0.0.0-20250113161000-0c2a238a16ed h1:PUMNZc1CBUg5G4JYio3wjpCJuCJG2ZeCuLvlBTpDiZI= knative.dev/pkg v0.0.0-20250113161000-0c2a238a16ed/go.mod h1:p7c7yCCf1YVX04FQ8YDyJaHciknu726qVWOXRX5tIBM= -knative.dev/serving v0.43.1-0.20250113163001-3e6d275e2bc4 h1:khvS0c6FTsz7Rrto0YzNMiqHYvzNWW5STDhu/G/aMlE= -knative.dev/serving v0.43.1-0.20250113163001-3e6d275e2bc4/go.mod h1:r6fkQURyTwcCiu/4dXFJbCW/Jp+lPUO9ULCARdTYjT8= +knative.dev/serving v0.43.1-0.20250115103709-b484fa275687 h1:bACr2Bzt12kMTKJA/LkC0Dj7xhcX45yoRsdJeCJEjj8= +knative.dev/serving v0.43.1-0.20250115103709-b484fa275687/go.mod h1:idwHPJ+HPW8/RPna8J2lQfkNQsYaWa7Y5K4bm6pJkDE= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/vendor/knative.dev/serving/pkg/activator/activator.go b/vendor/knative.dev/serving/pkg/activator/activator.go index eb8d73d18..f34f3794b 100644 --- a/vendor/knative.dev/serving/pkg/activator/activator.go +++ b/vendor/knative.dev/serving/pkg/activator/activator.go @@ -25,11 +25,9 @@ const ( RevisionHeaderNamespace = "Knative-Serving-Namespace" ) -var ( - // RevisionHeaders are the headers the activator uses to identify the - // revision. They are removed before reaching the user container. - RevisionHeaders = []string{ - RevisionHeaderName, - RevisionHeaderNamespace, - } -) +// RevisionHeaders are the headers the activator uses to identify the +// revision. They are removed before reaching the user container. +var RevisionHeaders = []string{ + RevisionHeaderName, + RevisionHeaderNamespace, +} diff --git a/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go b/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go index 95076a603..54d5f3c11 100644 --- a/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go +++ b/vendor/knative.dev/serving/pkg/apis/autoscaling/annotation_validation.go @@ -268,9 +268,9 @@ func validateInitialScale(config *autoscalerconfig.Config, m map[string]string) if err != nil { return apis.ErrInvalidValue(v, k) } else if initScaleInt < 0 { - return apis.ErrInvalidValue(v, fmt.Sprintf("%s must be greater than 0", k)) + return apis.ErrInvalidValue(v, k+" must be greater than 0") } else if !config.AllowZeroInitialScale && initScaleInt == 0 { - return apis.ErrInvalidValue(v, fmt.Sprintf("%s=0 not allowed by cluster", k)) + return apis.ErrInvalidValue(v, k+"=0 not allowed by cluster") } } return nil diff --git a/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go b/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go index 3d488f105..8ed7042c3 100644 --- a/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go +++ b/vendor/knative.dev/serving/pkg/apis/autoscaling/register.go @@ -138,8 +138,6 @@ const ( // points will be missed entirely by the panic window which is // smaller than the stable window. Anything less than 6 seconds // isn't going to work well. - // - // nolint:revive // False positive, Min means minimum, not minutes. WindowMin = 6 * time.Second // WindowMax is the maximum permitted stable autoscaling window. // This keeps the event horizon to a reasonable enough limit. diff --git a/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go b/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go index b6d05a7b7..b667c7b59 100644 --- a/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go +++ b/vendor/knative.dev/serving/pkg/apis/autoscaling/v1alpha1/podscalable_types.go @@ -54,9 +54,11 @@ type PodScalableStatus struct { Replicas int32 `json:"replicas,omitempty"` } -var _ duck.Populatable = (*PodScalable)(nil) -var _ duck.Implementable = (*PodScalable)(nil) -var _ apis.Listable = (*PodScalable)(nil) +var ( + _ duck.Populatable = (*PodScalable)(nil) + _ duck.Implementable = (*PodScalable)(nil) + _ apis.Listable = (*PodScalable)(nil) +) // GetFullType implements duck.Implementable func (*PodScalable) GetFullType() duck.Populatable { diff --git a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go index 142d42d36..b1761e238 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/fieldmask.go @@ -389,7 +389,6 @@ func HandlerMask(in *corev1.ProbeHandler) *corev1.ProbeHandler { out.GRPC = in.GRPC return out - } // ExecActionMask performs a _shallow_ copy of the Kubernetes ExecAction object to a new @@ -473,7 +472,7 @@ func ContainerPortMask(in *corev1.ContainerPort) *corev1.ContainerPort { out.Name = in.Name out.Protocol = in.Protocol - //Disallowed fields + // Disallowed fields // This list is unnecessary, but added here for clarity out.HostIP = "" out.HostPort = 0 @@ -552,7 +551,6 @@ func ConfigMapKeySelectorMask(in *corev1.ConfigMapKeySelector) *corev1.ConfigMap out.LocalObjectReference = in.LocalObjectReference return out - } // SecretKeySelectorMask performs a _shallow_ copy of the Kubernetes SecretKeySelector object to a new @@ -571,7 +569,6 @@ func SecretKeySelectorMask(in *corev1.SecretKeySelector) *corev1.SecretKeySelect out.LocalObjectReference = in.LocalObjectReference return out - } // ConfigMapEnvSourceMask performs a _shallow_ copy of the Kubernetes ConfigMapEnvSource object to a new @@ -589,7 +586,6 @@ func ConfigMapEnvSourceMask(in *corev1.ConfigMapEnvSource) *corev1.ConfigMapEnvS out.LocalObjectReference = in.LocalObjectReference return out - } // SecretEnvSourceMask performs a _shallow_ copy of the Kubernetes SecretEnvSource object to a new @@ -607,7 +603,6 @@ func SecretEnvSourceMask(in *corev1.SecretEnvSource) *corev1.SecretEnvSource { out.LocalObjectReference = in.LocalObjectReference return out - } // EnvFromSourceMask performs a _shallow_ copy of the Kubernetes EnvFromSource object to a new @@ -643,7 +638,6 @@ func ResourceRequirementsMask(in *corev1.ResourceRequirements) *corev1.ResourceR out.Requests = in.Requests return out - } // PodSecurityContextMask performs a _shallow_ copy of the Kubernetes PodSecurityContext object into a new diff --git a/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go b/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go index f63060c8b..aaf977564 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/k8s_lifecycle.go @@ -83,7 +83,6 @@ func TransformDeploymentStatus(ds *appsv1.DeploymentStatus) *duckv1.Status { depCondSet.Manage(s).MarkTrue(DeploymentConditionReplicaSetReady) } } - } } diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go index 6f01637ff..985317ec0 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/configuration_conversion.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:stylecheck // ignore complaints about receiver having different names package v1 import ( diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go index 053b79156..278ef1e51 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_conversion.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:stylecheck // ignore complaints about receiver having different names package v1 import ( diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation.go index 2508bf8bb..def2570fe 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/revision_validation.go @@ -193,10 +193,8 @@ func validateQueueSidecarResourceAnnotations(m map[string]string) *apis.FieldErr value, err := strconv.ParseFloat(v, 64) if err != nil { errs = errs.Also(apis.ErrInvalidValue(v, apis.CurrentField).ViaKey(k)) - } else { - if value < 0.1 || value > 100 { - errs = errs.Also(apis.ErrOutOfBoundsValue(value, 0.1, 100.0, apis.CurrentField).ViaKey(k)) - } + } else if value < 0.1 || value > 100 { + errs = errs.Also(apis.ErrOutOfBoundsValue(value, 0.1, 100.0, apis.CurrentField).ViaKey(k)) } } annoKeys := []kmap.KeyPriority{ diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go index 50682d0a8..1c3275553 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/route_conversion.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:stylecheck // ignore complaints about receiver having different names package v1 import ( diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go index 6f427dc11..3b4853f1a 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/service_conversion.go @@ -14,6 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ +//nolint:stylecheck // ignore complaints about receiver having different names package v1 import ( diff --git a/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults.go b/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults.go index be876c3a4..df1eba33b 100644 --- a/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults.go +++ b/vendor/knative.dev/serving/pkg/apis/serving/v1/service_defaults.go @@ -35,7 +35,6 @@ func (s *Service) SetDefaults(ctx context.Context) { s.Spec.SetDefaults(apis.WithinSpec(ctx)) serving.SetUserInfo(ctx, prevSpec, &s.Spec, s) - } // SetDefaults implements apis.Defaultable diff --git a/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go b/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go index 3714b5a2d..4e4461689 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/aggregation/bucketing.go @@ -166,13 +166,13 @@ func (t *WeightedFloat64Buckets) WindowAverage(now time.Time) float64 { if now.After(t.lastWrite) { numZ := now.Sub(t.lastWrite) / t.granularity // Skip to this multiplier directly: m*(1-m)^(nz-1). - multiplier = multiplier * math.Pow(1-t.smoothingCoeff, float64(numZ)) + multiplier *= math.Pow(1-t.smoothingCoeff, float64(numZ)) // Reduce effective number of buckets. numB -= int(numZ) } startIdx := t.timeToIndex(t.lastWrite) + totalB // To ensure always positive % operation. ret := 0. - for i := 0; i < numB; i++ { + for i := range numB { effectiveIdx := (startIdx - i) % totalB v := t.buckets[effectiveIdx] * multiplier ret += v @@ -336,7 +336,7 @@ func (t *TimedFloat64Buckets) ResizeWindow(w time.Duration) { // `newBuckets` buckets. oldNumBuckets := len(t.buckets) tIdx := t.timeToIndex(t.lastWrite) - for i := 0; i < min(numBuckets, oldNumBuckets); i++ { + for range min(numBuckets, oldNumBuckets) { oi := tIdx % oldNumBuckets ni := tIdx % numBuckets newBuckets[ni] = t.buckets[oi] diff --git a/vendor/knative.dev/serving/pkg/autoscaler/bucket/bucket.go b/vendor/knative.dev/serving/pkg/autoscaler/bucket/bucket.go index a6eb2fad4..37fb5f0b5 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/bucket/bucket.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/bucket/bucket.go @@ -46,7 +46,7 @@ func AutoscalerBucketName(ordinal, total uint32) string { // buckets with the given `total` count. func AutoscalerBucketSet(total uint32) *hash.BucketSet { names := make(sets.Set[string], total) - for i := uint32(0); i < total; i++ { + for i := range total { names.Insert(AutoscalerBucketName(i, total)) } return hash.NewBucketSet(names) diff --git a/vendor/knative.dev/serving/pkg/autoscaler/config/config.go b/vendor/knative.dev/serving/pkg/autoscaler/config/config.go index 8ae207d33..25bc0076b 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/config/config.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/config/config.go @@ -172,7 +172,6 @@ func validate(lc *autoscalerconfig.Config) (*autoscalerconfig.Config, error) { lc.PanicWindowPercentage > autoscaling.PanicWindowPercentageMax { return nil, fmt.Errorf("panic-window-percentage = %v, must be in [%v, %v] interval", lc.PanicWindowPercentage, autoscaling.PanicWindowPercentageMin, autoscaling.PanicWindowPercentageMax) - } if lc.InitialScale < 0 || (lc.InitialScale == 0 && !lc.AllowZeroInitialScale) { diff --git a/vendor/knative.dev/serving/pkg/autoscaler/metrics/collector.go b/vendor/knative.dev/serving/pkg/autoscaler/metrics/collector.go index 9719fac38..a17b6e1d4 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/metrics/collector.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/metrics/collector.go @@ -96,8 +96,10 @@ type MetricCollector struct { watcher func(types.NamespacedName) } -var _ Collector = (*MetricCollector)(nil) -var _ MetricClient = (*MetricCollector)(nil) +var ( + _ Collector = (*MetricCollector)(nil) + _ MetricClient = (*MetricCollector)(nil) +) // NewMetricCollector creates a new metric collector. func NewMetricCollector(statsScraperFactory StatsScraperFactory, logger *zap.SugaredLogger) *MetricCollector { @@ -263,7 +265,8 @@ func (c *collection) getScraper() StatsScraper { // newCollection creates a new collection, which uses the given scraper to // collect stats every scrapeTickInterval. func newCollection(metric *autoscalingv1alpha1.Metric, scraper StatsScraper, clock clock.WithTicker, - callback func(types.NamespacedName), logger *zap.SugaredLogger) *collection { + callback func(types.NamespacedName), logger *zap.SugaredLogger, +) *collection { // Pick the constructor to use to build the buckets. // NB: this relies on the fact that aggregation algorithm is set on annotation of revision // and as such is immutable. diff --git a/vendor/knative.dev/serving/pkg/autoscaler/metrics/stats_scraper.go b/vendor/knative.dev/serving/pkg/autoscaler/metrics/stats_scraper.go index e3ba25ab6..ab39e09af 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/metrics/stats_scraper.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/metrics/stats_scraper.go @@ -24,11 +24,11 @@ import ( "net/http" "strconv" "sync" + "sync/atomic" "time" "go.opencensus.io/stats" "go.opencensus.io/stats/view" - "go.uber.org/atomic" "go.uber.org/zap" "golang.org/x/sync/errgroup" @@ -160,7 +160,8 @@ type serviceScraper struct { // NewStatsScraper creates a new StatsScraper for the Revision which // the given Metric is responsible for. func NewStatsScraper(metric *autoscalingv1alpha1.Metric, revisionName string, podAccessor resources.PodAccessor, - usePassthroughLb bool, meshMode netcfg.MeshCompatibilityMode, logger *zap.SugaredLogger) StatsScraper { + usePassthroughLb bool, meshMode netcfg.MeshCompatibilityMode, logger *zap.SugaredLogger, +) StatsScraper { directClient := newHTTPScrapeClient(client) meshClient := newHTTPScrapeClient(noKeepaliveClient) return newServiceScraperWithClient(metric, revisionName, podAccessor, usePassthroughLb, meshMode, directClient, meshClient, logger) @@ -173,7 +174,8 @@ func newServiceScraperWithClient( usePassthroughLb bool, meshMode netcfg.MeshCompatibilityMode, directClient, meshClient scrapeClient, - logger *zap.SugaredLogger) *serviceScraper { + logger *zap.SugaredLogger, +) *serviceScraper { svcName := metric.Labels[serving.ServiceLabelKey] cfgName := metric.Labels[serving.ConfigurationLabelKey] @@ -276,16 +278,17 @@ func (s *serviceScraper) scrapePods(window time.Duration) (Stat, error) { pods = append(pods, youngPods...) grp, egCtx := errgroup.WithContext(context.Background()) - idx := atomic.NewInt32(-1) + var idx atomic.Int32 + idx.Store(-1) var sawNonMeshError atomic.Bool // Start |sampleSize| threads to scan in parallel. - for i := 0; i < sampleSize; i++ { + for range sampleSize { grp.Go(func() error { // If a given pod failed to scrape, we want to continue // scanning pods down the line. for { // Acquire next pod. - myIdx := int(idx.Inc()) + myIdx := int(idx.Add(1)) // All out? if myIdx >= len(pods) { return errPodsExhausted @@ -378,7 +381,7 @@ func (s *serviceScraper) scrapeService(window time.Duration) (Stat, error) { grp, egCtx := errgroup.WithContext(context.Background()) youngPodCutOffSecs := window.Seconds() - for i := 0; i < sampleSize; i++ { + for range sampleSize { grp.Go(func() error { for tries := 1; ; tries++ { stat, err := s.tryScrape(egCtx, scrapedPods) diff --git a/vendor/knative.dev/serving/pkg/autoscaler/scaling/autoscaler.go b/vendor/knative.dev/serving/pkg/autoscaler/scaling/autoscaler.go index 01f815f5d..0986c415e 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/scaling/autoscaler.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/scaling/autoscaler.go @@ -68,8 +68,8 @@ func New( namespace, revision string, metricClient metrics.MetricClient, podCounter resources.EndpointsCounter, - deciderSpec *DeciderSpec) UniScaler { - + deciderSpec *DeciderSpec, +) UniScaler { var delayer *max.TimeWindow if deciderSpec.ScaleDownDelay > 0 { delayer = max.NewTimeWindow(deciderSpec.ScaleDownDelay, tickInterval) @@ -85,8 +85,8 @@ func newAutoscaler( metricClient metrics.MetricClient, podCounter podCounter, deciderSpec *DeciderSpec, - delayWindow *max.TimeWindow) *autoscaler { - + delayWindow *max.TimeWindow, +) *autoscaler { // We always start in the panic mode, if the deployment is scaled up over 1 pod. // If the scale is 0 or 1, normal Autoscaler behavior is fine. // When Autoscaler restarts we lose metric history, which causes us to @@ -120,7 +120,7 @@ func newAutoscaler( delayWindow: delayWindow, panicTime: pt, - maxPanicPods: int32(curC), + maxPanicPods: int32(curC), //nolint:gosec // k8s replica count is bounded by int32 } } diff --git a/vendor/knative.dev/serving/pkg/autoscaler/scaling/multiscaler.go b/vendor/knative.dev/serving/pkg/autoscaler/scaling/multiscaler.go index 973657d59..d1f59b511 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/scaling/multiscaler.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/scaling/multiscaler.go @@ -193,7 +193,8 @@ type MultiScaler struct { func NewMultiScaler( stopCh <-chan struct{}, uniScalerFactory UniScalerFactory, - logger *zap.SugaredLogger) *MultiScaler { + logger *zap.SugaredLogger, +) *MultiScaler { return &MultiScaler{ scalers: make(map[types.NamespacedName]*scalerRunner), scalersStopCh: stopCh, diff --git a/vendor/knative.dev/serving/pkg/autoscaler/statforwarder/leases.go b/vendor/knative.dev/serving/pkg/autoscaler/statforwarder/leases.go index 7c27a0ca9..1806ffeae 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/statforwarder/leases.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/statforwarder/leases.go @@ -19,6 +19,8 @@ package statforwarder import ( "context" "fmt" + "net" + "strconv" "go.uber.org/zap" coordinationv1 "k8s.io/api/coordination/v1" @@ -173,7 +175,7 @@ func (f *leaseTracker) leaseUpdated(obj interface{}) { if ip != f.selfIP { f.fwd.setProcessor(n, newForwardProcessor(f.logger.With(zap.String("bucket", n)), n, holder, - fmt.Sprintf("ws://%s:%d", ip, autoscalerPort), + "ws://"+net.JoinHostPort(ip, strconv.Itoa(autoscalerPort)), fmt.Sprintf("ws://%s.%s.%s", n, ns, svcURLSuffix))) // Skip creating/updating Service and Endpoints if not the leader. @@ -242,8 +244,8 @@ func (f *leaseTracker) createOrUpdateEndpoints(ctx context.Context, ns, n string Name: autoscalerPortName, Port: autoscalerPort, Protocol: corev1.ProtocolTCP, - }}}, - } + }}, + }} exists := true var lastErr error @@ -257,7 +259,7 @@ func (f *leaseTracker) createOrUpdateEndpoints(ctx context.Context, ns, n string if err != nil { lastErr = err // Do not return the error to cause a retry. - return false, nil + return false, nil //nolint:nilerr } if equality.Semantic.DeepEqual(wantSubsets, e.Subsets) { @@ -268,7 +270,7 @@ func (f *leaseTracker) createOrUpdateEndpoints(ctx context.Context, ns, n string want.Subsets = wantSubsets if _, lastErr = f.kc.CoreV1().Endpoints(ns).Update(ctx, want, metav1.UpdateOptions{}); lastErr != nil { // Do not return the error to cause a retry. - return false, nil + return false, nil //nolint:nilerr } f.logger.Infof("Bucket Endpoints %s updated with IP %s", n, f.selfIP) diff --git a/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go b/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go index 18a90c1e3..24d2255df 100644 --- a/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go +++ b/vendor/knative.dev/serving/pkg/autoscaler/statserver/server.go @@ -70,7 +70,7 @@ func New(statsServerAddr string, statsCh chan<- metrics.StatMessage, logger *zap Addr: statsServerAddr, Handler: mux, ConnState: svr.onConnStateChange, - ReadHeaderTimeout: time.Minute, //https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6 + ReadHeaderTimeout: time.Minute, // https://medium.com/a-journey-with-go/go-understand-and-mitigate-slowloris-attack-711c1b1403f6 } return &svr } diff --git a/vendor/knative.dev/serving/pkg/gc/config.go b/vendor/knative.dev/serving/pkg/gc/config.go index 4218e6fc6..3378d4420 100644 --- a/vendor/knative.dev/serving/pkg/gc/config.go +++ b/vendor/knative.dev/serving/pkg/gc/config.go @@ -18,7 +18,9 @@ package gc import ( "context" + "errors" "fmt" + "math" "strconv" "strings" "time" @@ -111,6 +113,9 @@ func parseDisabledOrInt64(val string, toSet *int64) error { if err != nil { return err } + if parsed > math.MaxInt64 { + return fmt.Errorf("value should be lower than %v", math.MaxInt64) + } *toSet = int64(parsed) } return nil @@ -128,7 +133,7 @@ func parseDisabledOrDuration(val string, toSet *time.Duration) error { return err } if parsed < 0 { - return fmt.Errorf("must be non-negative") + return errors.New("must be non-negative") } *toSet = parsed } diff --git a/vendor/knative.dev/serving/pkg/http/request_log.go b/vendor/knative.dev/serving/pkg/http/request_log.go index 8bb86373a..1803bb7e6 100644 --- a/vendor/knative.dev/serving/pkg/http/request_log.go +++ b/vendor/knative.dev/serving/pkg/http/request_log.go @@ -23,10 +23,10 @@ import ( "net/http" "strings" "sync" + "sync/atomic" "text/template" "time" - "go.uber.org/atomic" netheader "knative.dev/networking/pkg/http/header" ) @@ -85,7 +85,8 @@ func RequestLogTemplateInputGetterFromRevision(rev *RequestLogRevision) RequestL // NewRequestLogHandler creates an http.Handler that logs request logs to an io.Writer. func NewRequestLogHandler(h http.Handler, w io.Writer, templateStr string, - inputGetter RequestLogTemplateInputGetter, enableProbeRequestLog bool) (*RequestLogHandler, error) { + inputGetter RequestLogTemplateInputGetter, enableProbeRequestLog bool, +) (*RequestLogHandler, error) { reqHandler := &RequestLogHandler{ handler: h, writer: w, diff --git a/vendor/knative.dev/serving/pkg/http/response_recorder.go b/vendor/knative.dev/serving/pkg/http/response_recorder.go index 079f50be2..01dbe008a 100644 --- a/vendor/knative.dev/serving/pkg/http/response_recorder.go +++ b/vendor/knative.dev/serving/pkg/http/response_recorder.go @@ -20,8 +20,7 @@ import ( "bufio" "net" "net/http" - - "go.uber.org/atomic" + "sync/atomic" "knative.dev/pkg/websocket" ) diff --git a/vendor/knative.dev/serving/pkg/metrics/tags.go b/vendor/knative.dev/serving/pkg/metrics/tags.go index bfaa3020a..ddece2c62 100644 --- a/vendor/knative.dev/serving/pkg/metrics/tags.go +++ b/vendor/knative.dev/serving/pkg/metrics/tags.go @@ -28,11 +28,9 @@ import ( "go.opencensus.io/tag" ) -var ( - // contextCache stores the metrics recorder contexts in an LRU cache. - // Hashicorp LRU cache is synchronized. - contextCache *lru.Cache -) +// contextCache stores the metrics recorder contexts in an LRU cache. +// Hashicorp LRU cache is synchronized. +var contextCache *lru.Cache // This is a fairly arbitrary number but we want it to be higher than the // number of active revisions a single activator might be handling, to avoid diff --git a/vendor/knative.dev/serving/pkg/networking/util.go b/vendor/knative.dev/serving/pkg/networking/util.go index 643fb1b06..7e1d28c85 100644 --- a/vendor/knative.dev/serving/pkg/networking/util.go +++ b/vendor/knative.dev/serving/pkg/networking/util.go @@ -2,7 +2,7 @@ package networking import ( "context" - "fmt" + "errors" "strings" "knative.dev/networking/pkg/apis/networking" @@ -22,7 +22,7 @@ func GetHTTPOption(ctx context.Context, networkConfig *netcfg.Config, annotation case netcfg.HTTPRedirected: return netv1alpha1.HTTPOptionRedirected, nil default: - return "", fmt.Errorf("incorrect http-protocol annotation: " + protocol) + return "", errors.New("incorrect http-protocol annotation: " + protocol) } } diff --git a/vendor/knative.dev/serving/pkg/queue/breaker.go b/vendor/knative.dev/serving/pkg/queue/breaker.go index 79b78a37c..918f57b74 100644 --- a/vendor/knative.dev/serving/pkg/queue/breaker.go +++ b/vendor/knative.dev/serving/pkg/queue/breaker.go @@ -21,14 +21,11 @@ import ( "errors" "fmt" "math" - - "go.uber.org/atomic" + "sync/atomic" ) -var ( - // ErrRequestQueueFull indicates the breaker queue depth was exceeded. - ErrRequestQueueFull = errors.New("pending request queue full") -) +// ErrRequestQueueFull indicates the breaker queue depth was exceeded. +var ErrRequestQueueFull = errors.New("pending request queue full") // MaxBreakerCapacity is the largest valid value for the MaxConcurrency value of BreakerParams. // This is limited by the maximum size of a chan struct{} in the current implementation. @@ -103,7 +100,7 @@ func (b *Breaker) tryAcquirePending() bool { if cur == b.totalSlots { return false } - if b.inFlight.CAS(cur, cur+1) { + if b.inFlight.CompareAndSwap(cur, cur+1) { return true } } @@ -111,7 +108,7 @@ func (b *Breaker) tryAcquirePending() bool { // releasePending releases a slot on the pending "queue". func (b *Breaker) releasePending() { - b.inFlight.Dec() + b.inFlight.Add(-1) } // Reserve reserves an execution slot in the breaker, to permit @@ -204,7 +201,7 @@ func (s *semaphore) tryAcquire() bool { return false } in++ - if s.state.CAS(old, pack(capacity, in)) { + if s.state.CompareAndSwap(old, pack(capacity, in)) { return true } } @@ -227,7 +224,7 @@ func (s *semaphore) acquire(ctx context.Context) error { } in++ - if s.state.CAS(old, pack(capacity, in)) { + if s.state.CompareAndSwap(old, pack(capacity, in)) { return nil } } @@ -246,7 +243,7 @@ func (s *semaphore) release() { } in-- - if s.state.CAS(old, pack(capacity, in)) { + if s.state.CompareAndSwap(old, pack(capacity, in)) { if in < capacity { select { case s.queue <- struct{}{}: @@ -265,7 +262,7 @@ func (s *semaphore) release() { // updateCapacity updates the capacity of the semaphore to the desired size. func (s *semaphore) updateCapacity(size int) { - s64 := uint64(size) + s64 := uint64(size) //nolint:gosec // TODO(dprotaso) capacity should be uint for { old := s.state.Load() capacity, in := unpack(old) @@ -275,9 +272,9 @@ func (s *semaphore) updateCapacity(size int) { return } - if s.state.CAS(old, pack(s64, in)) { + if s.state.CompareAndSwap(old, pack(s64, in)) { if s64 > capacity { - for i := uint64(0); i < s64-capacity; i++ { + for range s64 - capacity { select { case s.queue <- struct{}{}: default: @@ -293,7 +290,7 @@ func (s *semaphore) updateCapacity(size int) { // Capacity is the capacity of the semaphore. func (s *semaphore) Capacity() int { capacity, _ := unpack(s.state.Load()) - return int(capacity) + return int(capacity) //nolint:gosec // TODO(dprotaso) - capacity should be uint64 } // unpack takes an uint64 and returns two uint32 (as uint64) comprised of the leftmost diff --git a/vendor/knative.dev/serving/pkg/queue/health/handler.go b/vendor/knative.dev/serving/pkg/queue/health/handler.go index cdcab8b05..47737ca14 100644 --- a/vendor/knative.dev/serving/pkg/queue/health/handler.go +++ b/vendor/knative.dev/serving/pkg/queue/health/handler.go @@ -43,20 +43,23 @@ func ProbeHandler(prober func() bool, tracingEnabled bool) http.HandlerFunc { if ph != queue.Name { http.Error(w, badProbeTemplate+ph, http.StatusBadRequest) probeSpan.Annotate([]trace.Attribute{ - trace.StringAttribute("queueproxy.probe.error", badProbeTemplate+ph)}, "error") + trace.StringAttribute("queueproxy.probe.error", badProbeTemplate+ph), + }, "error") return } if prober == nil { http.Error(w, "no probe", http.StatusInternalServerError) probeSpan.Annotate([]trace.Attribute{ - trace.StringAttribute("queueproxy.probe.error", "no probe")}, "error") + trace.StringAttribute("queueproxy.probe.error", "no probe"), + }, "error") return } if !prober() { probeSpan.Annotate([]trace.Attribute{ - trace.StringAttribute("queueproxy.probe.error", "container not ready")}, "error") + trace.StringAttribute("queueproxy.probe.error", "container not ready"), + }, "error") w.WriteHeader(http.StatusServiceUnavailable) return } diff --git a/vendor/knative.dev/serving/pkg/queue/health/probe.go b/vendor/knative.dev/serving/pkg/queue/health/probe.go index 2aa0d6e27..fa262777a 100644 --- a/vendor/knative.dev/serving/pkg/queue/health/probe.go +++ b/vendor/knative.dev/serving/pkg/queue/health/probe.go @@ -24,6 +24,7 @@ import ( "net" "net/http" "net/url" + "strconv" "syscall" "time" @@ -105,7 +106,6 @@ func autoDowngradingTransport(opt HTTPProbeConfigOptions) http.RoundTripper { var transport = func() *http.Transport { t := http.DefaultTransport.(*http.Transport).Clone() - //nolint:gosec // We explicitly don't need to check certs here. t.TLSClientConfig.InsecureSkipVerify = true return t }() @@ -126,6 +126,7 @@ func http2UpgradeProbe(config HTTPProbeConfigOptions) (int, error) { if err != nil { return 0, fmt.Errorf("error constructing probe url %w", err) } + //nolint:noctx // timeout is specified on the http.Client above req, err := http.NewRequest(http.MethodOptions, url.String(), nil) if err != nil { return 0, fmt.Errorf("error constructing probe request %w", err) @@ -183,6 +184,7 @@ func HTTPProbe(config HTTPProbeConfigOptions) error { if err != nil { return fmt.Errorf("error constructing probe url %w", err) } + //nolint:noctx // timeout is specified on the http.Client above req, err := http.NewRequest(http.MethodGet, url.String(), nil) if err != nil { return fmt.Errorf("error constructing probe request %w", err) @@ -226,7 +228,6 @@ func isHTTPProbeReady(res *http.Response) bool { // GRPCProbe checks that gRPC connection can be established to the address. func GRPCProbe(config GRPCProbeConfigOptions) error { - // Use k8s.io/kubernetes/pkg/probe/dialer_others.go to correspond to OSs other than Windows dialer := &net.Dialer{ Control: func(network, address string, c syscall.RawConn) error { @@ -238,7 +239,6 @@ func GRPCProbe(config GRPCProbeConfigOptions) error { opts := []grpc.DialOption{ grpc.WithUserAgent(netheader.KubeProbeUAPrefix + config.KubeMajor + "/" + config.KubeMinor), - grpc.WithBlock(), grpc.WithTransportCredentials(insecure.NewCredentials()), // credentials are currently not supported grpc.WithContextDialer(func(ctx context.Context, addr string) (net.Conn, error) { return dialer.DialContext(ctx, "tcp", addr) @@ -249,9 +249,8 @@ func GRPCProbe(config GRPCProbeConfigOptions) error { defer cancel() - addr := net.JoinHostPort("127.0.0.1", fmt.Sprintf("%d", config.Port)) - conn, err := grpc.DialContext(ctx, addr, opts...) - + addr := net.JoinHostPort("127.0.0.1", strconv.Itoa(int(config.Port))) + conn, err := grpc.NewClient(addr, opts...) if err != nil { if errors.Is(err, context.DeadlineExceeded) { return fmt.Errorf("failed to connect service %q within %v: %w", addr, config.Timeout, err) @@ -268,7 +267,6 @@ func GRPCProbe(config GRPCProbeConfigOptions) error { resp, err := client.Check(metadata.NewOutgoingContext(ctx, make(metadata.MD)), &grpchealth.HealthCheckRequest{ Service: ptr.StringValue(config.Service), }) - if err != nil { stat, ok := status.FromError(err) if ok { diff --git a/vendor/knative.dev/serving/pkg/queue/protobuf_stats_reporter.go b/vendor/knative.dev/serving/pkg/queue/protobuf_stats_reporter.go index 3117b1b3a..5326894b7 100644 --- a/vendor/knative.dev/serving/pkg/queue/protobuf_stats_reporter.go +++ b/vendor/knative.dev/serving/pkg/queue/protobuf_stats_reporter.go @@ -18,10 +18,9 @@ package queue import ( "net/http" + "sync/atomic" "time" - "go.uber.org/atomic" - "github.com/gogo/protobuf/proto" netheader "knative.dev/networking/pkg/http/header" diff --git a/vendor/knative.dev/serving/pkg/queue/readiness/probe.go b/vendor/knative.dev/serving/pkg/queue/readiness/probe.go index b5b09636c..b0cca8d99 100644 --- a/vendor/knative.dev/serving/pkg/queue/readiness/probe.go +++ b/vendor/knative.dev/serving/pkg/queue/readiness/probe.go @@ -18,6 +18,7 @@ package readiness import ( "context" + "errors" "fmt" "io" "os" @@ -164,9 +165,9 @@ func (p *Probe) probeContainerImpl() bool { case innerProbe.Exec != nil: // Should never be reachable. Exec probes to be translated to // TCP probes when container is built. - return fmt.Errorf("exec probe not supported") + return errors.New("exec probe not supported") default: - return fmt.Errorf("no probe found") + return errors.New("no probe found") } }) } @@ -196,7 +197,7 @@ func (p *wrappedProbe) doProbe(probe func(time.Duration) error) error { // We'll log the lastProbeErr if we don't eventually succeed. lastProbeErr = err failCount++ - return false, nil + return false, nil //nolint:nilerr } p.count++ diff --git a/vendor/knative.dev/serving/pkg/queue/request_metric.go b/vendor/knative.dev/serving/pkg/queue/request_metric.go index 6f43a01bc..2a0a4d84d 100644 --- a/vendor/knative.dev/serving/pkg/queue/request_metric.go +++ b/vendor/knative.dev/serving/pkg/queue/request_metric.go @@ -74,7 +74,8 @@ type appRequestMetricsHandler struct { // NewRequestMetricsHandler creates an http.Handler that emits request metrics. func NewRequestMetricsHandler(next http.Handler, - ns, service, config, rev, pod string) (http.Handler, error) { + ns, service, config, rev, pod string, +) (http.Handler, error) { keys := []tag.Key{metrics.PodKey, metrics.ContainerKey, metrics.ResponseCodeKey, metrics.ResponseCodeClassKey, metrics.RouteTagKey} if err := pkgmetrics.RegisterResourceView( &view.View{ @@ -136,7 +137,8 @@ func (h *requestMetricsHandler) ServeHTTP(w http.ResponseWriter, r *http.Request // NewAppRequestMetricsHandler creates an http.Handler that emits request metrics. func NewAppRequestMetricsHandler(next http.Handler, b *Breaker, - ns, service, config, rev, pod string) (http.Handler, error) { + ns, service, config, rev, pod string, +) (http.Handler, error) { keys := []tag.Key{metrics.PodKey, metrics.ContainerKey, metrics.ResponseCodeKey, metrics.ResponseCodeClassKey} if err := pkgmetrics.RegisterResourceView(&view.View{ Description: "The number of requests that are routed to user-container", diff --git a/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go b/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go index d84317522..ef072b643 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go +++ b/vendor/knative.dev/serving/pkg/reconciler/accessor/networking/certificate.go @@ -40,8 +40,8 @@ type CertificateAccessor interface { // ReconcileCertificate reconciles Certificate to the desired status. func ReconcileCertificate(ctx context.Context, owner kmeta.Accessor, desired *v1alpha1.Certificate, - certAccessor CertificateAccessor) (*v1alpha1.Certificate, error) { - + certAccessor CertificateAccessor, +) (*v1alpha1.Certificate, error) { recorder := controller.GetEventRecorder(ctx) if recorder == nil { return nil, fmt.Errorf("recorder for reconciling Certificate %s/%s is not created", desired.Namespace, desired.Name) @@ -66,7 +66,6 @@ func ReconcileCertificate(ctx context.Context, owner kmeta.Accessor, desired *v1 } else if !equality.Semantic.DeepEqual(cert.Spec, desired.Spec) || !equality.Semantic.DeepEqual(cert.Annotations, desired.Annotations) || !equality.Semantic.DeepEqual(cert.Labels, desired.Labels) { - // Don't modify the informers copy existing := cert.DeepCopy() existing.Spec = desired.Spec diff --git a/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go b/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go index f7ef707df..5e9b5735b 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go +++ b/vendor/knative.dev/serving/pkg/reconciler/autoscaling/reconciler.go @@ -46,7 +46,8 @@ type Base struct { // ReconcileSKS reconciles a ServerlessService based on the given PodAutoscaler. func (c *Base) ReconcileSKS(ctx context.Context, pa *autoscalingv1alpha1.PodAutoscaler, - mode nv1alpha1.ServerlessServiceOperationMode, numActivators int32) (*nv1alpha1.ServerlessService, error) { + mode nv1alpha1.ServerlessServiceOperationMode, numActivators int32, +) (*nv1alpha1.ServerlessService, error) { logger := logging.FromContext(ctx) sksName := anames.SKS(pa.Name) diff --git a/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go b/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go index 658cbe440..b0efb1698 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go +++ b/vendor/knative.dev/serving/pkg/reconciler/autoscaling/resources/target.go @@ -29,8 +29,7 @@ import ( // `target` is the target value of scaling metric that we autoscaler will aim for; // `total` is the maximum possible value of scaling metric that is permitted on the pod. func ResolveMetricTarget(pa *autoscalingv1alpha1.PodAutoscaler, config *autoscalerconfig.Config) (target, total float64) { - tu := 0. - + var tu float64 switch pa.Metric() { case autoscaling.RPS: total = config.RPSTargetDefault diff --git a/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go b/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go index 3de2842e7..16350cd27 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go +++ b/vendor/knative.dev/serving/pkg/reconciler/configuration/configuration.go @@ -116,7 +116,6 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, config *v1.Configuration config.Status.MarkLatestCreatedFailed(lcr.Name, rc.GetMessage()) if !equality.Semantic.DeepEqual(beforeReady, config.Status.GetCondition(v1.ConfigurationConditionReady)) { - if lcr.Name == config.Status.LatestReadyRevisionName { recorder.Eventf(config, corev1.EventTypeWarning, "LatestReadyFailed", "Latest ready revision %q has failed", lcr.Name) diff --git a/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go b/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go index 62ee9ff36..8783e75d0 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go +++ b/vendor/knative.dev/serving/pkg/reconciler/configuration/resources/revision.go @@ -19,6 +19,7 @@ package resources import ( "context" "fmt" + "strconv" "time" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -104,7 +105,7 @@ func RevisionLabelValueForKey(key string, config metav1.Object) string { case serving.ServiceLabelKey: return config.GetLabels()[serving.ServiceLabelKey] case serving.ConfigurationGenerationLabelKey: - return fmt.Sprint(config.GetGeneration()) + return strconv.FormatInt(config.GetGeneration(), 10) case serving.ConfigurationUIDLabelKey: return string(config.GetUID()) case serving.ServiceUIDLabelKey: diff --git a/vendor/knative.dev/serving/pkg/reconciler/domainmapping/reconciler.go b/vendor/knative.dev/serving/pkg/reconciler/domainmapping/reconciler.go index 3a9dbfc23..ef6c4d371 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/domainmapping/reconciler.go +++ b/vendor/knative.dev/serving/pkg/reconciler/domainmapping/reconciler.go @@ -263,7 +263,6 @@ func (r *Reconciler) reconcileIngress(ctx context.Context, dm *v1beta1.DomainMap } else if !equality.Semantic.DeepEqual(ingress.Spec, desired.Spec) || !equality.Semantic.DeepEqual(ingress.Annotations, desired.Annotations) || !equality.Semantic.DeepEqual(ingress.Labels, desired.Labels) { - // Don't modify the informers copy origin := ingress.DeepCopy() origin.Spec = desired.Spec diff --git a/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go b/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go index 58cc74778..169849fdd 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go +++ b/vendor/knative.dev/serving/pkg/reconciler/gc/gc.go @@ -40,7 +40,8 @@ func collect( ctx context.Context, client clientset.Interface, revisionLister listers.RevisionLister, - config *v1.Configuration) pkgreconciler.Event { + config *v1.Configuration, +) pkgreconciler.Event { cfg := configns.FromContext(ctx).RevisionGC logger := logging.FromContext(ctx) @@ -75,7 +76,7 @@ func collect( // If we need `min` to remain, this is the max count of rev can delete. maxIdx := len(revs) - min staleCount := 0 - for i := 0; i < count; i++ { + for i := range count { rev := revs[i] if !isRevisionStale(cfg, rev, logger) { continue @@ -89,7 +90,6 @@ func collect( if staleCount >= maxIdx { return nil // Reaches max revs to delete } - } nonStaleCount := count - staleCount diff --git a/vendor/knative.dev/serving/pkg/reconciler/labeler/accessors.go b/vendor/knative.dev/serving/pkg/reconciler/labeler/accessors.go index 36782ca84..dce202bb1 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/labeler/accessors.go +++ b/vendor/knative.dev/serving/pkg/reconciler/labeler/accessors.go @@ -62,7 +62,8 @@ func newRevisionAccessor( tracker tracker.Interface, lister listers.RevisionLister, indexer cache.Indexer, - clock clock.PassiveClock) *revisionAccessor { + clock clock.PassiveClock, +) *revisionAccessor { return &revisionAccessor{ client: client, tracker: tracker, @@ -74,7 +75,8 @@ func newRevisionAccessor( // makeMetadataPatch makes a metadata map to be patched or nil if no changes are needed. func makeMetadataPatch( - acc kmeta.Accessor, routeName string, addRoutingState, remove bool, clock clock.PassiveClock) (map[string]interface{}, error) { + acc kmeta.Accessor, routeName string, addRoutingState, remove bool, clock clock.PassiveClock, +) (map[string]interface{}, error) { labels := map[string]interface{}{} annotations := map[string]interface{}{} @@ -99,7 +101,6 @@ func makeMetadataPatch( // markRoutingState updates the RoutingStateLabel and bumps the modified time annotation. func markRoutingState(acc kmeta.Accessor, clock clock.PassiveClock, diffLabels, diffAnn map[string]interface{}) { - hasRoute := acc.GetAnnotations()[serving.RoutesAnnotationKey] != "" if val, has := diffAnn[serving.RoutesAnnotationKey]; has { hasRoute = val != nil @@ -196,7 +197,8 @@ func newConfigurationAccessor( tracker tracker.Interface, lister listers.ConfigurationLister, indexer cache.Indexer, - clock clock.PassiveClock) *configurationAccessor { + clock clock.PassiveClock, +) *configurationAccessor { return &configurationAccessor{ client: client, tracker: tracker, diff --git a/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go b/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go index d1983c9f4..1c343e70f 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go +++ b/vendor/knative.dev/serving/pkg/reconciler/labeler/labeler.go @@ -31,8 +31,10 @@ type Reconciler struct { } // Check that our Reconciler implements routereconciler.Interface -var _ routereconciler.Interface = (*Reconciler)(nil) -var _ routereconciler.Finalizer = (*Reconciler)(nil) +var ( + _ routereconciler.Interface = (*Reconciler)(nil) + _ routereconciler.Finalizer = (*Reconciler)(nil) +) // FinalizeKind removes all Route reference metadata from its traffic targets. // This does not modify or observe spec for the Route itself. diff --git a/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go b/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go index 883781061..9d11b0b84 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go +++ b/vendor/knative.dev/serving/pkg/reconciler/nscert/nscert.go @@ -53,6 +53,8 @@ type reconciler struct { // Check that our Reconciler implements namespacereconciler.Interface var _ namespacereconciler.Interface = (*reconciler)(nil) + +// precompile domain template regexp var domainTemplateRegex = regexp.MustCompile(`^\*\..+$`) func certClass(ctx context.Context, r *corev1.Namespace) string { diff --git a/vendor/knative.dev/serving/pkg/reconciler/revision/background.go b/vendor/knative.dev/serving/pkg/reconciler/revision/background.go index 0bef35cba..a53a0f2f0 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/revision/background.go +++ b/vendor/knative.dev/serving/pkg/reconciler/revision/background.go @@ -42,7 +42,7 @@ type backgroundResolver struct { resolver imageResolver enqueue func(types.NamespacedName) - queue workqueue.RateLimitingInterface + queue workqueue.TypedRateLimitingInterface[any] mu sync.RWMutex results map[types.NamespacedName]*resolveResult @@ -78,7 +78,7 @@ type workItem struct { image string } -func newBackgroundResolver(logger *zap.SugaredLogger, resolver imageResolver, queue workqueue.RateLimitingInterface, enqueue func(types.NamespacedName)) *backgroundResolver { +func newBackgroundResolver(logger *zap.SugaredLogger, resolver imageResolver, queue workqueue.TypedRateLimitingInterface[any], enqueue func(types.NamespacedName)) *backgroundResolver { r := &backgroundResolver{ logger: logger, @@ -100,7 +100,7 @@ func (r *backgroundResolver) Start(stop <-chan struct{}, maxInFlight int) (done // Run the worker threads. wg.Add(maxInFlight) - for i := 0; i < maxInFlight; i++ { + for range maxInFlight { go func() { defer wg.Done() for { diff --git a/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go b/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go index 614707b6d..70d525836 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go +++ b/vendor/knative.dev/serving/pkg/reconciler/revision/controller.go @@ -120,11 +120,11 @@ func newControllerWithOptions( userAgent := fmt.Sprintf("knative/%s (serving)", changeset.Get()) - digestResolveQueue := workqueue.NewNamedRateLimitingQueue(workqueue.NewMaxOfRateLimiter( + digestResolveQueue := workqueue.NewTypedRateLimitingQueueWithConfig(workqueue.NewTypedMaxOfRateLimiter( newItemExponentialFailureRateLimiter(1*time.Second, 1000*time.Second), // 10 qps, 100 bucket size. This is only for retry speed and its only the overall factor (not per item) - &workqueue.BucketRateLimiter{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, - ), "digests") + &workqueue.TypedBucketRateLimiter[any]{Limiter: rate.NewLimiter(rate.Limit(10), 100)}, + ), workqueue.TypedRateLimitingQueueConfig[any]{Name: "digests"}) resolver := newBackgroundResolver(logger, &digestResolver{client: kubeclient.Get(ctx), transport: transport, userAgent: userAgent}, digestResolveQueue, impl.EnqueueKey) resolver.Start(ctx.Done(), digestResolutionWorkers) diff --git a/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go b/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go index 0943b3cbc..a8eb1852a 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go +++ b/vendor/knative.dev/serving/pkg/reconciler/revision/cruds.go @@ -38,7 +38,6 @@ func (c *Reconciler) createDeployment(ctx context.Context, rev *v1.Revision) (*a cfgs := config.FromContext(ctx) deployment, err := resources.MakeDeployment(rev, cfgs) - if err != nil { return nil, fmt.Errorf("failed to make deployment: %w", err) } diff --git a/vendor/knative.dev/serving/pkg/reconciler/revision/rate_limiter.go b/vendor/knative.dev/serving/pkg/reconciler/revision/rate_limiter.go index b4b1c3f31..6ed28b66a 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/revision/rate_limiter.go +++ b/vendor/knative.dev/serving/pkg/reconciler/revision/rate_limiter.go @@ -39,9 +39,9 @@ type itemExponentialFailureRateLimiter struct { maxDelay time.Duration } -var _ workqueue.RateLimiter = &itemExponentialFailureRateLimiter{} +var _ workqueue.TypedRateLimiter[any] = &itemExponentialFailureRateLimiter{} -func newItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) workqueue.RateLimiter { +func newItemExponentialFailureRateLimiter(baseDelay time.Duration, maxDelay time.Duration) workqueue.TypedRateLimiter[any] { return &itemExponentialFailureRateLimiter{ failures: map[interface{}]int{}, baseDelay: baseDelay, diff --git a/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go b/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go index 16aeeae86..dc967a00a 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go +++ b/vendor/knative.dev/serving/pkg/reconciler/revision/resolve.go @@ -65,6 +65,7 @@ func newResolverTransport(path string, maxIdleConns, maxIdleConnsPerHost int) (* transport := http.DefaultTransport.(*http.Transport).Clone() transport.MaxIdleConns = maxIdleConns transport.MaxIdleConnsPerHost = maxIdleConnsPerHost + //nolint:gosec // quay.io still required 1.2 - bump if they've moved up transport.TLSClientConfig = &tls.Config{ MinVersion: tlsMinVersionFromEnv(tls.VersionTLS12), RootCAs: pool, @@ -91,7 +92,8 @@ func (r *digestResolver) Resolve( ctx context.Context, image string, opt k8schain.Options, - registriesToSkip sets.Set[string]) (string, error) { + registriesToSkip sets.Set[string], +) (string, error) { kc, err := k8schain.New(ctx, r.client, opt) if err != nil { return "", fmt.Errorf("failed to initialize authentication: %w", err) diff --git a/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go b/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go index f62c5514f..aba5c877b 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go +++ b/vendor/knative.dev/serving/pkg/reconciler/revision/resources/deploy.go @@ -58,7 +58,6 @@ var ( SubPathExpr: "$(K_INTERNAL_POD_NAMESPACE)_$(K_INTERNAL_POD_NAME)_", } - //nolint:gosec // Volume, not hardcoded credentials varTokenVolume = corev1.Volume{ Name: "knative-token-volume", VolumeSource: corev1.VolumeSource{ @@ -74,7 +73,6 @@ var ( ReadOnly: true, } - //nolint:gosec // VolumeMount, not hardcoded credentials varTokenVolumeMount = corev1.VolumeMount{ Name: varTokenVolume.Name, MountPath: queue.TokenDirectory, diff --git a/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go b/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go index 0483111f6..1ef7f58e4 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go +++ b/vendor/knative.dev/serving/pkg/reconciler/revision/resources/queue.go @@ -330,7 +330,6 @@ func makeQueueContainer(rev *v1.Revision, cfg *config.Config) (*corev1.Container if err != nil { return nil, fmt.Errorf("failed to serialize multiple readiness probes: %w", err) } - } else if userContainerReadinessProbe != nil { readinessProbeJSON, err = readiness.EncodeSingleProbe(userContainerReadinessProbe) if err != nil { diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go b/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go index 2c67a9cbe..6154a0a0e 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/config/domain.go @@ -35,11 +35,9 @@ const ( DomainTypeWildcard = "wildcard" ) -var ( - // DefaultDomain holds the domain that Route's live under by default - // when no label selector-based options apply. - DefaultDomain = "svc." + network.GetClusterDomainName() -) +// DefaultDomain holds the domain that Route's live under by default +// when no label selector-based options apply. +var DefaultDomain = "svc." + network.GetClusterDomainName() // LabelSelector represents map of {key,value} pairs. A single {key,value} in the // map is equivalent to a requirement key == value. The requirements are ANDed. @@ -131,7 +129,6 @@ func (c *Domain) LookupDomainForLabels(labels map[string]string) string { return "svc." + network.GetClusterDomainName() } for k, v := range c.Domains { - // Ignore if selector doesn't match, or decrease the specificity. if !v.Selector.Matches(labels) || v.Selector.specificity() < specificity { continue diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go b/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go index 843f2f538..50cce3e56 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/domains/domains.go @@ -43,9 +43,7 @@ import ( // HTTPScheme is the string representation of http. const HTTPScheme string = "http" -var ( - ErrorDomainName = errors.New("domain name error") -) +var ErrDomainName = errors.New("domain name error") // GetAllDomainsAndTags returns all of the domains and tags(including subdomains) associated with a Route func GetAllDomainsAndTags(ctx context.Context, r *v1.Route, names []string, visibility map[string]netv1alpha1.IngressVisibility) (map[string]string, error) { @@ -124,12 +122,12 @@ func DomainNameFromTemplate(ctx context.Context, r metav1.ObjectMeta, name strin } if err := templ.Execute(&buf, data); err != nil { - return "", fmt.Errorf("%w: error executing the DomainTemplate: %w", ErrorDomainName, err) + return "", fmt.Errorf("%w: error executing the DomainTemplate: %w", ErrDomainName, err) } urlErrs := validation.IsFullyQualifiedDomainName(field.NewPath("url"), buf.String()) if urlErrs != nil { - return "", fmt.Errorf("%w: invalid domain name %q: %w", ErrorDomainName, buf.String(), urlErrs.ToAggregate()) + return "", fmt.Errorf("%w: invalid domain name %q: %w", ErrDomainName, buf.String(), urlErrs.ToAggregate()) } return buf.String(), nil @@ -152,7 +150,7 @@ func HostnameFromTemplate(ctx context.Context, name, tag string) (string, error) networkConfig := config.FromContext(ctx).Network buf := bytes.Buffer{} if err := networkConfig.GetTagTemplate().Execute(&buf, data); err != nil { - return "", fmt.Errorf("%w: error executing the TagTemplate: %w", ErrorDomainName, err) + return "", fmt.Errorf("%w: error executing the TagTemplate: %w", ErrDomainName, err) } return buf.String(), nil } diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go b/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go index c028be315..41799c237 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/reconcile_resources.go @@ -83,7 +83,6 @@ func (c *Reconciler) reconcileIngress( if !equality.Semantic.DeepEqual(ingress.Spec, desired.Spec) || !equality.Semantic.DeepEqual(ingress.Annotations, desired.Annotations) || !equality.Semantic.DeepEqual(ingress.Labels, desired.Labels) { - // It is notable that one reason for differences here may be defaulting. // When that is the case, the Update will end up being a nop because the // webhook will bring them into alignment and no new reconciliation will occur. @@ -119,7 +118,6 @@ func (c *Reconciler) deleteOrphanedServices(ctx context.Context, r *v1.Route, ac routeLabelSelector := labels.SelectorFromSet(labels.Set{serving.RouteLabelKey: r.Name}) allServices, err := c.serviceLister.Services(ns).List(routeLabelSelector) - if err != nil { return fmt.Errorf("failed to fetch existing services: %w", err) } @@ -222,7 +220,6 @@ func (c *Reconciler) updatePlaceholderServices(ctx context.Context, route *v1.Ro eg, egCtx := errgroup.WithContext(ctx) for _, from := range pairs { - from := from eg.Go(func() error { to, err := resources.MakeK8sService(egCtx, route, from.Tag, ingress, resources.IsClusterLocalService(from.Service)) if err != nil { @@ -253,7 +250,6 @@ func (c *Reconciler) updatePlaceholderServices(ctx context.Context, route *v1.Ro // else: // clusterIPs are immutable thus any transition requires a recreate // ie. "None" <=> "" (blank - request an IP) - } else /* types are the same and not clusterIP */ { canUpdate = true } @@ -329,7 +325,8 @@ func deserializeRollout(ctx context.Context, ro string) *traffic.Rollout { func (c *Reconciler) reconcileRollout( ctx context.Context, r *v1.Route, tc *traffic.Config, - ingress *netv1alpha1.Ingress) *traffic.Rollout { + ingress *netv1alpha1.Ingress, +) *traffic.Rollout { cfg := config.FromContext(ctx) // Is there rollout duration specified? @@ -344,7 +341,6 @@ func (c *Reconciler) reconcileRollout( return curRO } // Get the current rollout state as described by the traffic. - nextStepTime := int64(0) logger := logging.FromContext(ctx).Desugar().With( zap.Int("durationSecs", rd)) logger.Debug("Rollout is enabled. Stepping from previous state.") diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go b/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go index 2f7f7a60d..ff8d2dc81 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/resources/ingress.go @@ -233,7 +233,8 @@ func makeIngressRule(domains sets.Set[string], ns string, visibility netv1alpha1.IngressVisibility, targets traffic.RevisionTargets, roCfgs []*traffic.ConfigurationRollout, - encryption bool) netv1alpha1.IngressRule { + encryption bool, +) netv1alpha1.IngressRule { return netv1alpha1.IngressRule{ Hosts: sets.List(domains), Visibility: visibility, @@ -270,7 +271,8 @@ func rolloutConfig(cfgName string, ros []*traffic.ConfigurationRollout) *traffic } func makeBaseIngressPath(ns string, targets traffic.RevisionTargets, - roCfgs []*traffic.ConfigurationRollout, encryption bool) *netv1alpha1.HTTPIngressPath { + roCfgs []*traffic.ConfigurationRollout, encryption bool, +) *netv1alpha1.HTTPIngressPath { // Optimistically allocate |targets| elements. splits := make([]netv1alpha1.IngressBackendSplit, 0, len(targets)) for _, t := range targets { diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/route.go b/vendor/knative.dev/serving/pkg/reconciler/route/route.go index fd391bb81..ecdd60e06 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/route.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/route.go @@ -120,7 +120,7 @@ func (c *Reconciler) ReconcileKind(ctx context.Context, r *v1.Route) pkgreconcil traffic, err := c.configureTraffic(ctx, r) if traffic == nil || err != nil { if err != nil { - if errors.Is(err, domains.ErrorDomainName) { + if errors.Is(err, domains.ErrDomainName) { r.Status.MarkRevisionTargetTrafficError(errorConfigMsg, err.Error()) } else { r.Status.MarkUnknownTrafficError(err.Error()) @@ -276,11 +276,11 @@ func (c *Reconciler) externalDomainTLS(ctx context.Context, host string, r *v1.R if renewingCondition.Status == corev1.ConditionTrue { logger.Infof("Renewing Condition detected on Cert (%s), will attempt creating new challenges.", cert.Name) if len(cert.Status.HTTP01Challenges) == 0 { - //Not sure log level this should be at. - //It is possible for certs to be renewed without getting - //validated again, for example, LetsEncrypt will cache - //validation results. See - //[here](https://letsencrypt.org/docs/faq/#i-successfully-renewed-a-certificate-but-validation-didn-t-happen-this-time-how-is-that-possible) + // Not sure log level this should be at. + // It is possible for certs to be renewed without getting + // validated again, for example, LetsEncrypt will cache + // validation results. See + // [here](https://letsencrypt.org/docs/faq/#i-successfully-renewed-a-certificate-but-validation-didn-t-happen-this-time-how-is-that-possible) logger.Infof("No HTTP01Challenges found on Cert (%s).", cert.Name) } acmeChallenges = append(acmeChallenges, cert.Status.HTTP01Challenges...) diff --git a/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go b/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go index bd24754f1..13a187402 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go +++ b/vendor/knative.dev/serving/pkg/reconciler/route/traffic/traffic.go @@ -79,7 +79,8 @@ type Config struct { // // In the case that some target is missing, an error of type TargetError will be returned. func BuildTrafficConfiguration(configLister listers.ConfigurationLister, revLister listers.RevisionLister, - r *v1.Route) (*Config, error) { + r *v1.Route, +) (*Config, error) { return newBuilder(configLister, revLister, r).build() } @@ -118,7 +119,8 @@ func (cfg *Config) computeURL(ctx context.Context, r *v1.Route, tt *RevisionTarg } func (cfg *Config) targetToStatus(ctx context.Context, r *v1.Route, tt *RevisionTarget, - revs []RevisionRollout, results []v1.TrafficTarget) (_ []v1.TrafficTarget, err error) { + revs []RevisionRollout, results []v1.TrafficTarget, +) (_ []v1.TrafficTarget, err error) { var url *apis.URL // Do this once per tag. if tt.Tag != "" { @@ -207,7 +209,8 @@ type configBuilder struct { func newBuilder( configLister listers.ConfigurationLister, revLister listers.RevisionLister, - r *v1.Route) *configBuilder { + r *v1.Route, +) *configBuilder { return &configBuilder{ configLister: configLister.Configurations(r.Namespace), revLister: revLister.Revisions(r.Namespace), diff --git a/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go b/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go index f6b7530bc..ad1d63a9a 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go +++ b/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/controller.go @@ -43,7 +43,6 @@ func NewController( ctx context.Context, cmw configmap.Watcher, ) *controller.Impl { - logger := logging.FromContext(ctx) serviceInformer := serviceinformer.Get(ctx) endpointsInformer := endpointsinformer.Get(ctx) diff --git a/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go b/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go index 5672fc12e..3292a9b88 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go +++ b/vendor/knative.dev/serving/pkg/reconciler/serverlessservice/resources/services.go @@ -63,8 +63,9 @@ func makePublicServicePorts(sks *v1alpha1.ServerlessService) []corev1.ServicePor Name: pkgnet.ServicePortName(sks.Spec.ProtocolType), Protocol: corev1.ProtocolTCP, AppProtocol: pkgnet.AppProtocol(sks.Spec.ProtocolType), - Port: int32(pkgnet.ServicePort(sks.Spec.ProtocolType)), - TargetPort: targetPort(sks), + //nolint:gosec //ignore integer overflow since pkgnet is bounded + Port: int32(pkgnet.ServicePort(sks.Spec.ProtocolType)), + TargetPort: targetPort(sks), }, { // The HTTPS port is used when activator-ca is enabled. // Although it is not used by default, we put it here as it should be harmless diff --git a/vendor/knative.dev/serving/pkg/reconciler/service/service.go b/vendor/knative.dev/serving/pkg/reconciler/service/service.go index d74a3ac6b..d1cffe4f3 100644 --- a/vendor/knative.dev/serving/pkg/reconciler/service/service.go +++ b/vendor/knative.dev/serving/pkg/reconciler/service/service.go @@ -55,7 +55,8 @@ type Reconciler struct { // NewReconciler creates the reference to the Reconciler based on clientset.Interface, listers.ConfigurationLister, // listers.RevisionLister and listers.RouteLister. func NewReconciler(client clientset.Interface, configurationLister listers.ConfigurationLister, - revisionLister listers.RevisionLister, routeLister listers.RouteLister) *Reconciler { + revisionLister listers.RevisionLister, routeLister listers.RouteLister, +) *Reconciler { return &Reconciler{ client: client, configurationLister: configurationLister, diff --git a/vendor/modules.txt b/vendor/modules.txt index 06aab3327..154c6a332 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1279,7 +1279,7 @@ knative.dev/pkg/version knative.dev/pkg/webhook knative.dev/pkg/webhook/certificates/resources knative.dev/pkg/websocket -# knative.dev/serving v0.43.1-0.20250113163001-3e6d275e2bc4 +# knative.dev/serving v0.43.1-0.20250115103709-b484fa275687 ## explicit; go 1.22.7 knative.dev/serving/pkg/activator knative.dev/serving/pkg/apis/autoscaling