Search is not available for this dataset
qid
int64 1
10.8k
| question
stringlengths 11
62.2k
| data
stringclasses 1
value | meta
float64 | response
stringlengths 1
55.5M
⌀ |
|---|---|---|---|---|
13
|
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for pkg/scheduler
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/integration/scheduler
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/e2e/scheduling
using wait.ConditionWithContextFunc for PodScheduled/PodIsGettingEvicted/PodScheduledIn/PodUnschedulable/PodSchedulingError
|
2023-08-18
| null |
index b8afe554ca8..d035b16721b 100644
--- a/pkg/scheduler/framework/plugins/volumebinding/binder.go
+++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go
@@ -523,7 +523,7 @@ func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, p
return err
}
- err = wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(ctx, time.Second, b.bindTimeout, false, func(ctx context.Context) (bool, error) {
b, err := b.checkBindings(assumedPod, bindings, claimsToProvision)
return b, err
})
diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go
index 7c8661045b1..5d7fdca1a1b 100644
--- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go
+++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go
@@ -349,7 +349,7 @@ func (env *testEnv) updateVolumes(ctx context.Context, pvs []*v1.PersistentVolum
}
pvs[i] = newPv
}
- return wait.Poll(100*time.Millisecond, 3*time.Second, func() (bool, error) {
+ return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (bool, error) {
for _, pv := range pvs {
obj, err := env.internalPVCache.GetAPIObj(pv.Name)
if obj == nil || err != nil {
@@ -375,7 +375,7 @@ func (env *testEnv) updateClaims(ctx context.Context, pvcs []*v1.PersistentVolum
}
pvcs[i] = newPvc
}
- return wait.Poll(100*time.Millisecond, 3*time.Second, func() (bool, error) {
+ return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (bool, error) {
for _, pvc := range pvcs {
obj, err := env.internalPVCCache.GetAPIObj(getPVCName(pvc))
if obj == nil || err != nil {
diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go
index 1321f10cb1c..946c9a757d0 100644
--- a/test/e2e/scheduling/limit_range.go
+++ b/test/e2e/scheduling/limit_range.go
@@ -177,7 +177,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying LimitRange updating is effective")
- err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(ctx, time.Second*2, time.Second*20, false, func(ctx context.Context) (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
@@ -199,7 +199,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying the LimitRange was deleted")
- err = wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(ctx, time.Second*5, e2eservice.RespondingTimeout, false, func(ctx context.Context) (bool, error) {
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go
index 5fccfb52e91..6ae213dd16d 100644
--- a/test/e2e/scheduling/preemption.go
+++ b/test/e2e/scheduling/preemption.go
@@ -728,7 +728,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// - if it's less than expected replicas, it denotes its pods are under-preempted
// "*2" means pods of ReplicaSet{1,2} are expected to be only preempted once.
expectedRSPods := []int32{1 * 2, 1 * 2, 1}
- err := wait.Poll(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
+ err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, false, func(ctx context.Context) (bool, error) {
for i := 0; i < len(podNamesSeen); i++ {
got := atomic.LoadInt32(&podNamesSeen[i])
if got < expectedRSPods[i] {
@@ -905,7 +905,7 @@ func createPod(ctx context.Context, f *framework.Framework, conf pausePodConfig)
// waitForPreemptingWithTimeout verifies if 'pod' is preempting within 'timeout', specifically it checks
// if the 'spec.NodeName' field of preemptor 'pod' has been set.
func waitForPreemptingWithTimeout(ctx context.Context, f *framework.Framework, pod *v1.Pod, timeout time.Duration) {
- err := wait.Poll(2*time.Second, timeout, func() (bool, error) {
+ err := wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
diff --git a/test/integration/node/lifecycle_test.go b/test/integration/node/lifecycle_test.go
index 5346c29e01e..e9451d319c5 100644
--- a/test/integration/node/lifecycle_test.go
+++ b/test/integration/node/lifecycle_test.go
@@ -154,7 +154,7 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
t.Errorf("Failed to taint node in test %s <%s>, err: %v", name, nodes[nodeIndex].Name, err)
}
- err = wait.PollImmediate(time.Second, time.Second*20, testutils.PodIsGettingEvicted(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*20, true, testutils.PodIsGettingEvicted(cs, testPod.Namespace, testPod.Name))
if err != nil {
t.Fatalf("Error %q in test %q when waiting for terminating pod: %q", err, name, klog.KObj(testPod))
}
diff --git a/test/integration/podgc/podgc_test.go b/test/integration/podgc/podgc_test.go
index 799f7f26273..236e350762d 100644
--- a/test/integration/podgc/podgc_test.go
+++ b/test/integration/podgc/podgc_test.go
@@ -148,7 +148,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
if err != nil {
t.Fatalf("Failed to delete node: %v, err: %v", pod.Spec.NodeName, err)
}
- err = wait.PollImmediate(time.Second, time.Second*15, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*15, true, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
if err != nil {
t.Fatalf("Error '%v' while waiting for the pod '%v' to be terminating", err, klog.KObj(pod))
}
@@ -261,7 +261,7 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
t.Fatalf("Error: '%v' while deleting pod: '%v'", err, klog.KObj(pod))
}
// wait until the pod is terminating
- err = wait.PollImmediate(time.Second, time.Second*15, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*15, true, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
if err != nil {
t.Fatalf("Error '%v' while waiting for the pod '%v' to be terminating", err, klog.KObj(pod))
}
diff --git a/test/integration/scheduler/extender/extender_test.go b/test/integration/scheduler/extender/extender_test.go
index 1ba4a5614cb..d34b8160acc 100644
--- a/test/integration/scheduler/extender/extender_test.go
+++ b/test/integration/scheduler/extender/extender_test.go
@@ -410,7 +410,8 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
t.Fatalf("Failed to create pod: %v", err)
}
- err = wait.Poll(time.Second, wait.ForeverTestTimeout, testutils.PodScheduled(cs, myPod.Namespace, myPod.Name))
+ err = wait.PollUntilContextTimeout(context.TODO(), time.Second, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, myPod.Namespace, myPod.Name))
if err != nil {
t.Fatalf("Failed to schedule pod: %v", err)
}
diff --git a/test/integration/scheduler/filters/filters_test.go b/test/integration/scheduler/filters/filters_test.go
index 07b08d704e9..6ca67b99b64 100644
--- a/test/integration/scheduler/filters/filters_test.go
+++ b/test/integration/scheduler/filters/filters_test.go
@@ -832,7 +832,8 @@ func TestInterPodAffinity(t *testing.T) {
if err != nil {
t.Fatalf("Error while creating pod: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Error while creating pod: %v", err)
}
@@ -849,9 +850,11 @@ func TestInterPodAffinity(t *testing.T) {
}
if test.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Error while trying to fit a pod: %v", err)
@@ -1016,7 +1019,8 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
if err != nil {
t.Fatalf("Error while creating pod: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Error while creating pod: %v", err)
}
@@ -1033,9 +1037,11 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
}
if test.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Error while trying to fit a pod: %v", err)
@@ -1517,7 +1523,8 @@ func TestPodTopologySpreadFilter(t *testing.T) {
if err != nil {
t.Fatalf("Error while creating pod during test: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Error while waiting for pod during test: %v", err)
}
@@ -1528,9 +1535,11 @@ func TestPodTopologySpreadFilter(t *testing.T) {
}
if tt.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Test Failed: %v", err)
diff --git a/test/integration/scheduler/plugins/plugins_test.go b/test/integration/scheduler/plugins/plugins_test.go
index 7082bb33400..fa6c181ab40 100644
--- a/test/integration/scheduler/plugins/plugins_test.go
+++ b/test/integration/scheduler/plugins/plugins_test.go
@@ -672,7 +672,8 @@ func TestPreFilterPlugin(t *testing.T) {
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
}
} else if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -844,7 +845,8 @@ func TestPostFilterPlugin(t *testing.T) {
}
if tt.rejectFilter {
- if err = wait.Poll(10*time.Millisecond, 10*time.Second, testutils.PodUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 10*time.Second, false,
+ testutils.PodUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Didn't expect the pod to be scheduled.")
}
@@ -912,7 +914,8 @@ func TestScorePlugin(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -1003,7 +1006,7 @@ func TestReservePluginReserve(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second,
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
}
@@ -1131,7 +1134,8 @@ func TestPrebindPlugin(t *testing.T) {
if err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, pod, 10*time.Second); err != nil {
t.Errorf("Expected the pod to be schedulable on retry, but got an error: %v", err)
}
- } else if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ } else if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
} else if test.reject {
@@ -1148,7 +1152,7 @@ func TestPrebindPlugin(t *testing.T) {
}
if test.unschedulablePod != nil {
- if err := wait.Poll(10*time.Millisecond, 15*time.Second, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 15*time.Second, false, func(ctx context.Context) (bool, error) {
// 2 means the unschedulable pod is expected to be retried at least twice.
// (one initial attempt plus the one moved by the preBind pod)
return filterPlugin.deepCopy().numFilterCalled >= 2*nodesNum, nil
@@ -1273,7 +1277,8 @@ func TestUnReserveReservePlugins(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
}
@@ -1509,7 +1514,8 @@ func TestUnReserveBindPlugins(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
}
@@ -1681,7 +1687,7 @@ func TestBindPlugin(t *testing.T) {
t.Errorf("Expected %s not to be called, was called %d times.", p2.Name(), p2.numBindCalled)
}
}
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, func() (done bool, err error) {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) {
p := postBindPlugin.deepCopy()
return p.numPostBindCalled == 1, nil
}); err != nil {
@@ -1692,7 +1698,8 @@ func TestBindPlugin(t *testing.T) {
}
} else {
// bind plugin fails to bind the pod
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
p := postBindPlugin.deepCopy()
@@ -1762,7 +1769,8 @@ func TestPostBindPlugin(t *testing.T) {
}
if test.preBindFail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
if postBindPlugin.numPostBindCalled > 0 {
@@ -1858,7 +1866,8 @@ func TestPermitPlugin(t *testing.T) {
t.Errorf("Error while creating a test pod: %v", err)
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
} else {
@@ -1907,7 +1916,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
var waitingPod framework.WaitingPod
// Wait until the test pod is actually waiting.
- wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
return waitingPod != nil, nil
})
@@ -1959,14 +1968,14 @@ func TestPermitPluginsCancelled(t *testing.T) {
var waitingPod framework.WaitingPod
// Wait until the test pod is actually waiting.
- wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
return waitingPod != nil, nil
})
perPlugin1.rejectAllPods()
// Wait some time for the permit plugins to be cancelled
- err = wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
p1 := perPlugin1.deepCopy()
p2 := perPlugin2.deepCopy()
return p1.cancelled && p2.cancelled, nil
@@ -2100,7 +2109,8 @@ func TestFilterPlugin(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
if filterPlugin.numFilterCalled < 1 {
@@ -2156,7 +2166,8 @@ func TestPreScorePlugin(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -2361,7 +2372,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
t.Fatalf("Error while creating the waiting pod: %v", err)
}
// Wait until the waiting-pod is actually waiting.
- if err := wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
w := false
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
return w, nil
@@ -2386,7 +2397,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
}
if w := tt.waitingPod; w != nil {
- if err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
w := false
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
return !w, nil
diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go
index c57d83247ac..c13cbdd6b95 100644
--- a/test/integration/scheduler/preemption/preemption_test.go
+++ b/test/integration/scheduler/preemption/preemption_test.go
@@ -78,8 +78,8 @@ const filterPluginName = "filter-plugin"
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
- if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) {
- pod, err := cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
+ if err := wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) {
+ pod, err := cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -504,7 +504,8 @@ func TestPreemption(t *testing.T) {
// Wait for preemption of pods and make sure the other ones are not preempted.
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
- if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
+ podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
}
pod, err := cs.CoreV1().Pods(p.Namespace).Get(testCtx.Ctx, p.Name, metav1.GetOptions{})
@@ -883,7 +884,7 @@ func TestPreemptionStarvation(t *testing.T) {
}
// Make sure that all pending pods are being marked unschedulable.
for _, p := range pendingPods {
- if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false,
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err)
}
@@ -1214,8 +1215,8 @@ func TestNominatedNodeCleanUp(t *testing.T) {
}
// Verify if .status.nominatedNodeName is cleared.
- if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
- pod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), "medium", metav1.GetOptions{})
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
+ pod, err := cs.CoreV1().Pods(ns).Get(ctx, "medium", metav1.GetOptions{})
if err != nil {
t.Errorf("Error getting the medium pod: %v", err)
}
@@ -1485,7 +1486,8 @@ func TestPDBInPreemption(t *testing.T) {
// Wait for preemption of pods and make sure the other ones are not preempted.
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
- if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
+ podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.name, p.Namespace, p.Name)
}
} else {
@@ -1622,8 +1624,8 @@ func TestPreferNominatedNode(t *testing.T) {
if err != nil {
t.Errorf("Error while creating high priority pod: %v", err)
}
- err = wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
- preemptor, err = cs.CoreV1().Pods(test.pod.Namespace).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
+ preemptor, err = cs.CoreV1().Pods(test.pod.Namespace).Get(ctx, test.pod.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Error getting the preemptor pod info: %v", err)
}
@@ -1975,7 +1977,8 @@ func TestReadWriteOncePodPreemption(t *testing.T) {
// Wait for preemption of pods and make sure the other ones are not preempted.
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
- if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
+ podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
}
} else {
diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go
index 24603f487f5..7b68fcc8681 100644
--- a/test/integration/scheduler/queue_test.go
+++ b/test/integration/scheduler/queue_test.go
@@ -138,7 +138,7 @@ func TestSchedulingGates(t *testing.T) {
}
// Wait for the pods to be present in the scheduling queue.
- if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
return len(pendingPods) == len(tt.pods), nil
}); err != nil {
@@ -215,7 +215,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
}
// Wait for the three pods to be present in the scheduling queue.
- if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
return len(pendingPods) == 3, nil
}); err != nil {
@@ -396,7 +396,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
}
// Wait for the testing Pod to be present in the scheduling queue.
- if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
return len(pendingPods) == 1, nil
}); err != nil {
@@ -489,14 +489,14 @@ func TestRequeueByBindFailure(t *testing.T) {
}
// first binding try should fail.
- err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, testutils.PodSchedulingError(cs, ns, "pod-1"))
+ err := wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, testutils.PodSchedulingError(cs, ns, "pod-1"))
if err != nil {
t.Fatalf("Expect pod-1 to be rejected by the bind plugin")
}
// The pod should be enqueued to activeQ/backoffQ without any event.
// The pod should be scheduled in the second binding try.
- err = wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, testutils.PodScheduled(cs, ns, "pod-1"))
+ err = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, testutils.PodScheduled(cs, ns, "pod-1"))
if err != nil {
t.Fatalf("Expect pod-1 to be scheduled by the bind plugin in the second binding try")
}
@@ -610,20 +610,20 @@ func TestRequeueByPermitRejection(t *testing.T) {
})
// Wait for pod-2 to be scheduled.
- err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) {
+ err := wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (done bool, err error) {
fakePermit.frameworkHandler.IterateOverWaitingPods(func(wp framework.WaitingPod) {
if wp.GetPod().Name == "pod-2" {
wp.Allow(fakePermitPluginName)
}
})
- return testutils.PodScheduled(cs, ns, "pod-2")()
+ return testutils.PodScheduled(cs, ns, "pod-2")(ctx)
})
if err != nil {
t.Fatalf("Expect pod-2 to be scheduled")
}
- err = wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) {
+ err = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (done bool, err error) {
pod1Found := false
fakePermit.frameworkHandler.IterateOverWaitingPods(func(wp framework.WaitingPod) {
if wp.GetPod().Name == "pod-1" {
diff --git a/test/integration/scheduler/rescheduling_test.go b/test/integration/scheduler/rescheduling_test.go
index 6bf011b3802..67781fff9c5 100644
--- a/test/integration/scheduler/rescheduling_test.go
+++ b/test/integration/scheduler/rescheduling_test.go
@@ -218,7 +218,8 @@ func TestReScheduling(t *testing.T) {
// The first time for scheduling, pod is error or unschedulable, controlled by wantFirstSchedulingError
if test.wantFirstSchedulingError {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -238,7 +239,8 @@ func TestReScheduling(t *testing.T) {
t.Errorf("Didn't expect the pod to be unschedulable. error: %v", err)
}
} else if test.wantError {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go
index 64e714ea71a..bb2d3851aa4 100644
--- a/test/integration/scheduler/scheduler_test.go
+++ b/test/integration/scheduler/scheduler_test.go
@@ -144,7 +144,7 @@ func TestUnschedulableNodes(t *testing.T) {
if err == nil {
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
}
- if err != wait.ErrWaitTimeout {
+ if !wait.Interrupted(err) {
t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err)
} else {
t.Logf("Test %d: Pod did not get scheduled on an unschedulable node", i)
@@ -321,7 +321,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
}
gotProfiles := make(map[string]string)
- if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
var ev watch.Event
select {
case ev = <-evs.ResultChan():
diff --git a/test/integration/scheduler/scoring/priorities_test.go b/test/integration/scheduler/scoring/priorities_test.go
index ceebf53e0a7..6101bb0f5fa 100644
--- a/test/integration/scheduler/scoring/priorities_test.go
+++ b/test/integration/scheduler/scoring/priorities_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package scoring
import (
+ "context"
"fmt"
"strings"
"testing"
@@ -628,7 +629,8 @@ func TestPodTopologySpreadScoring(t *testing.T) {
if err != nil {
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
}
@@ -640,9 +642,11 @@ func TestPodTopologySpreadScoring(t *testing.T) {
}
if tt.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Test Failed: %v", err)
@@ -706,8 +710,8 @@ func TestDefaultPodTopologySpreadScoring(t *testing.T) {
}
var pods []v1.Pod
// Wait for all Pods scheduled.
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
- podList, err := cs.CoreV1().Pods(ns).List(testCtx.Ctx, metav1.ListOptions{})
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
+ podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
t.Fatalf("Cannot list pods to verify scheduling: %v", err)
}
diff --git a/test/integration/util/util.go b/test/integration/util/util.go
index 8d70e0006be..63ffa12f6ed 100644
--- a/test/integration/util/util.go
+++ b/test/integration/util/util.go
@@ -579,7 +579,7 @@ func InitTestSchedulerWithOptions(
// WaitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
// an error if it does not scheduled within the given timeout.
func WaitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
- return wait.Poll(100*time.Millisecond, timeout, PodScheduled(cs, pod.Namespace, pod.Name))
+ return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, PodScheduled(cs, pod.Namespace, pod.Name))
}
// WaitForPodToSchedule waits for a pod to get scheduled and returns an error if
@@ -589,9 +589,9 @@ func WaitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
}
// PodScheduled checks if the pod has been scheduled
-func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -899,9 +899,9 @@ func RunPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error)
}
// PodIsGettingEvicted returns true if the pod's deletion timestamp is set.
-func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -913,9 +913,9 @@ func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wa
}
// PodScheduledIn returns true if a given pod is placed onto one of the expected nodes.
-func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -934,9 +934,9 @@ func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNam
// PodUnschedulable returns a condition function that returns true if the given pod
// gets unschedulable status of reason 'Unschedulable'.
-func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -950,9 +950,9 @@ func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
// PodSchedulingError returns a condition function that returns true if the given pod
// gets unschedulable status for reasons other than "Unschedulable". The scheduler
// records such reasons in case of error.
-func PodSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -981,7 +981,7 @@ func PodSchedulingGated(c clientset.Interface, podNamespace, podName string) wai
// WaitForPodUnschedulableWithTimeout waits for a pod to fail scheduling and returns
// an error if it does not become unschedulable within the given timeout.
func WaitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
- return wait.Poll(100*time.Millisecond, timeout, PodUnschedulable(cs, pod.Namespace, pod.Name))
+ return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, PodUnschedulable(cs, pod.Namespace, pod.Name))
}
// WaitForPodUnschedulable waits for a pod to fail scheduling and returns
|
25
|
cleanup: Update deprecated FromInt to FromInt32 (#119858)
* redo commit
* apply suggestions from liggitt
* update Parse function based on suggestions
|
2023-08-18
| null |
index 1c57110171f..42725d8dab8 100644
--- a/cmd/kubeadm/app/phases/controlplane/manifests.go
+++ b/cmd/kubeadm/app/phases/controlplane/manifests.go
@@ -63,9 +63,9 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap
ImagePullPolicy: v1.PullIfNotPresent,
Command: getAPIServerCommand(cfg, endpoint),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)),
- LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", int(endpoint.BindPort), v1.URISchemeHTTPS),
- ReadinessProbe: staticpodutil.ReadinessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/readyz", int(endpoint.BindPort), v1.URISchemeHTTPS),
- StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", int(endpoint.BindPort), v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane),
+ LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", endpoint.BindPort, v1.URISchemeHTTPS),
+ ReadinessProbe: staticpodutil.ReadinessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/readyz", endpoint.BindPort, v1.URISchemeHTTPS),
+ StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", endpoint.BindPort, v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane),
Resources: staticpodutil.ComponentResources("250m"),
Env: kubeadmutil.MergeEnv(proxyEnvs, cfg.APIServer.ExtraEnvs),
}, mounts.GetVolumes(kubeadmconstants.KubeAPIServer),
diff --git a/cmd/kubeadm/app/util/staticpod/utils.go b/cmd/kubeadm/app/util/staticpod/utils.go
index 4f74e7e84ed..cab4feb16bf 100644
--- a/cmd/kubeadm/app/util/staticpod/utils.go
+++ b/cmd/kubeadm/app/util/staticpod/utils.go
@@ -242,20 +242,20 @@ func ReadStaticPodFromDisk(manifestPath string) (*v1.Pod, error) {
}
// LivenessProbe creates a Probe object with a HTTPGet handler
-func LivenessProbe(host, path string, port int, scheme v1.URIScheme) *v1.Probe {
+func LivenessProbe(host, path string, port int32, scheme v1.URIScheme) *v1.Probe {
// sets initialDelaySeconds same as periodSeconds to skip one period before running a check
return createHTTPProbe(host, path, port, scheme, 10, 15, 8, 10)
}
// ReadinessProbe creates a Probe object with a HTTPGet handler
-func ReadinessProbe(host, path string, port int, scheme v1.URIScheme) *v1.Probe {
+func ReadinessProbe(host, path string, port int32, scheme v1.URIScheme) *v1.Probe {
// sets initialDelaySeconds as '0' because we don't want to delay user infrastructure checks
// looking for "ready" status on kubeadm static Pods
return createHTTPProbe(host, path, port, scheme, 0, 15, 3, 1)
}
// StartupProbe creates a Probe object with a HTTPGet handler
-func StartupProbe(host, path string, port int, scheme v1.URIScheme, timeoutForControlPlane *metav1.Duration) *v1.Probe {
+func StartupProbe(host, path string, port int32, scheme v1.URIScheme, timeoutForControlPlane *metav1.Duration) *v1.Probe {
periodSeconds, timeoutForControlPlaneSeconds := int32(10), kubeadmconstants.DefaultControlPlaneTimeout.Seconds()
if timeoutForControlPlane != nil {
timeoutForControlPlaneSeconds = timeoutForControlPlane.Seconds()
@@ -267,13 +267,13 @@ func StartupProbe(host, path string, port int, scheme v1.URIScheme, timeoutForCo
return createHTTPProbe(host, path, port, scheme, periodSeconds, 15, failureThreshold, periodSeconds)
}
-func createHTTPProbe(host, path string, port int, scheme v1.URIScheme, initialDelaySeconds, timeoutSeconds, failureThreshold, periodSeconds int32) *v1.Probe {
+func createHTTPProbe(host, path string, port int32, scheme v1.URIScheme, initialDelaySeconds, timeoutSeconds, failureThreshold, periodSeconds int32) *v1.Probe {
return &v1.Probe{
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Host: host,
Path: path,
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(port),
Scheme: scheme,
},
},
@@ -312,7 +312,7 @@ func GetSchedulerProbeAddress(cfg *kubeadmapi.ClusterConfiguration) string {
// GetEtcdProbeEndpoint takes a kubeadm Etcd configuration object and attempts to parse
// the first URL in the listen-metrics-urls argument, returning an etcd probe hostname,
// port and scheme
-func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int, v1.URIScheme) {
+func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int32, v1.URIScheme) {
localhost := "127.0.0.1"
if isIPv6 {
localhost = "::1"
@@ -346,7 +346,7 @@ func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int, v1.UR
port = p
}
}
- return hostname, port, scheme
+ return hostname, int32(port), scheme
}
return localhost, kubeadmconstants.EtcdMetricsPort, v1.URISchemeHTTP
}
diff --git a/cmd/kubeadm/app/util/staticpod/utils_test.go b/cmd/kubeadm/app/util/staticpod/utils_test.go
index 8f7b2461cdc..bcea14876d1 100644
--- a/cmd/kubeadm/app/util/staticpod/utils_test.go
+++ b/cmd/kubeadm/app/util/staticpod/utils_test.go
@@ -244,7 +244,7 @@ func TestGetEtcdProbeEndpoint(t *testing.T) {
cfg *kubeadmapi.Etcd
isIPv6 bool
expectedHostname string
- expectedPort int
+ expectedPort int32
expectedScheme v1.URIScheme
}{
{
diff --git a/pkg/apis/apps/v1/defaults_test.go b/pkg/apis/apps/v1/defaults_test.go
index 9028a3bef54..d5683252b2c 100644
--- a/pkg/apis/apps/v1/defaults_test.go
+++ b/pkg/apis/apps/v1/defaults_test.go
@@ -176,7 +176,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) {
}
func getMaxUnavailable(maxUnavailable int) *intstr.IntOrString {
- maxUnavailableIntOrStr := intstr.FromInt(maxUnavailable)
+ maxUnavailableIntOrStr := intstr.FromInt32(int32(maxUnavailable))
return &maxUnavailableIntOrStr
}
diff --git a/pkg/apis/apps/v1beta1/defaults_test.go b/pkg/apis/apps/v1beta1/defaults_test.go
index 6c6e24369ac..086b652174c 100644
--- a/pkg/apis/apps/v1beta1/defaults_test.go
+++ b/pkg/apis/apps/v1beta1/defaults_test.go
@@ -532,6 +532,6 @@ func getPartition(partition int32) *int32 {
}
func getMaxUnavailable(maxUnavailable int) *intstr.IntOrString {
- maxUnavailableIntOrStr := intstr.FromInt(maxUnavailable)
+ maxUnavailableIntOrStr := intstr.FromInt32(int32(maxUnavailable))
return &maxUnavailableIntOrStr
}
diff --git a/pkg/apis/apps/v1beta2/defaults_test.go b/pkg/apis/apps/v1beta2/defaults_test.go
index 11a24389c07..acf05c15466 100644
--- a/pkg/apis/apps/v1beta2/defaults_test.go
+++ b/pkg/apis/apps/v1beta2/defaults_test.go
@@ -176,7 +176,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) {
}
func getMaxUnavailable(maxUnavailable int) *intstr.IntOrString {
- maxUnavailableIntOrStr := intstr.FromInt(maxUnavailable)
+ maxUnavailableIntOrStr := intstr.FromInt32(int32(maxUnavailable))
return &maxUnavailableIntOrStr
}
diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go
index b19ac05f14b..b40e8ad54df 100644
--- a/pkg/apis/core/validation/validation_test.go
+++ b/pkg/apis/core/validation/validation_test.go
@@ -8197,7 +8197,7 @@ func TestValidateInitContainers(t *testing.T) {
StartupProbe: &core.Probe{
ProbeHandler: core.ProbeHandler{
TCPSocket: &core.TCPSocketAction{
- Port: intstr.FromInt(80),
+ Port: intstr.FromInt32(80),
},
},
SuccessThreshold: 1,
@@ -8413,7 +8413,7 @@ func TestValidateInitContainers(t *testing.T) {
RestartPolicy: &containerRestartPolicyAlways,
StartupProbe: &core.Probe{
ProbeHandler: core.ProbeHandler{
- TCPSocket: &core.TCPSocketAction{Port: intstr.FromInt(80)},
+ TCPSocket: &core.TCPSocketAction{Port: intstr.FromInt32(80)},
},
SuccessThreshold: 2,
},
diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go
index 74c74fe9b14..86f9ae7fb5c 100644
--- a/pkg/controller/daemon/update_test.go
+++ b/pkg/controller/daemon/update_test.go
@@ -48,7 +48,7 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
@@ -90,7 +90,7 @@ func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
// surge is thhe controlling amount
maxSurge := 2
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
- ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
+ ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge)))
manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl)
@@ -135,7 +135,7 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
@@ -171,7 +171,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
@@ -203,7 +203,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
maxSurge := 3
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
- ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
+ ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge)))
manager.dsStore.Update(ds)
// all old pods are unavailable so should be surged
@@ -347,7 +347,7 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
diff --git a/pkg/controller/deployment/sync_test.go b/pkg/controller/deployment/sync_test.go
index f5f2f899d3f..b7605257bd9 100644
--- a/pkg/controller/deployment/sync_test.go
+++ b/pkg/controller/deployment/sync_test.go
@@ -35,7 +35,7 @@ import (
)
func intOrStrP(val int) *intstr.IntOrString {
- intOrStr := intstr.FromInt(val)
+ intOrStr := intstr.FromInt32(int32(val))
return &intOrStr
}
diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go
index 42c7bbf4377..c0f2f33a96e 100644
--- a/pkg/controller/deployment/util/deployment_util_test.go
+++ b/pkg/controller/deployment/util/deployment_util_test.go
@@ -516,11 +516,11 @@ func TestNewRSNewReplicas(t *testing.T) {
newDeployment.Spec.Strategy = apps.DeploymentStrategy{Type: test.strategyType}
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxUnavailable: func(i int) *intstr.IntOrString {
- x := intstr.FromInt(i)
+ x := intstr.FromInt32(int32(i))
return &x
}(1),
MaxSurge: func(i int) *intstr.IntOrString {
- x := intstr.FromInt(i)
+ x := intstr.FromInt32(int32(i))
return &x
}(test.maxSurge),
}
@@ -705,8 +705,8 @@ func TestDeploymentComplete(t *testing.T) {
Replicas: &desired,
Strategy: apps.DeploymentStrategy{
RollingUpdate: &apps.RollingUpdateDeployment{
- MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)),
- MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)),
+ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(maxUnavailable)),
+ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(maxSurge)),
},
Type: apps.RollingUpdateDeploymentStrategyType,
},
@@ -960,7 +960,7 @@ func TestMaxUnavailable(t *testing.T) {
Replicas: func(i int32) *int32 { return &i }(replicas),
Strategy: apps.DeploymentStrategy{
RollingUpdate: &apps.RollingUpdateDeployment{
- MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)),
+ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(1)),
MaxUnavailable: &maxUnavailable,
},
Type: apps.RollingUpdateDeploymentStrategyType,
@@ -1255,7 +1255,7 @@ func TestGetDeploymentsForReplicaSet(t *testing.T) {
}
func TestMinAvailable(t *testing.T) {
- maxSurge := func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1))
+ maxSurge := func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(1))
deployment := func(replicas int32, maxUnavailable intstr.IntOrString) *apps.Deployment {
return &apps.Deployment{
Spec: apps.DeploymentSpec{
diff --git a/pkg/controlplane/controller/kubernetesservice/controller.go b/pkg/controlplane/controller/kubernetesservice/controller.go
index bfee9fa8555..4966eb9738b 100644
--- a/pkg/controlplane/controller/kubernetesservice/controller.go
+++ b/pkg/controlplane/controller/kubernetesservice/controller.go
@@ -184,7 +184,7 @@ func createPortAndServiceSpec(servicePort int, targetServicePort int, nodePort i
Protocol: corev1.ProtocolTCP,
Port: int32(servicePort),
Name: servicePortName,
- TargetPort: intstr.FromInt(targetServicePort),
+ TargetPort: intstr.FromInt32(int32(targetServicePort)),
}}
serviceType := corev1.ServiceTypeClusterIP
if nodePort > 0 {
diff --git a/pkg/kubelet/lifecycle/handlers_test.go b/pkg/kubelet/lifecycle/handlers_test.go
index 0e6bc0459be..a6d095add38 100644
--- a/pkg/kubelet/lifecycle/handlers_test.go
+++ b/pkg/kubelet/lifecycle/handlers_test.go
@@ -45,7 +45,7 @@ import (
func TestResolvePortInt(t *testing.T) {
expected := 80
- port, err := resolvePort(intstr.FromInt(expected), &v1.Container{})
+ port, err := resolvePort(intstr.FromInt32(int32(expected)), &v1.Container{})
if port != expected {
t.Errorf("expected: %d, saw: %d", expected, port)
}
diff --git a/pkg/kubelet/prober/scale_test.go b/pkg/kubelet/prober/scale_test.go
index 6de9687e183..0b8b003d6f3 100644
--- a/pkg/kubelet/prober/scale_test.go
+++ b/pkg/kubelet/prober/scale_test.go
@@ -257,14 +257,14 @@ func (f *fakePod) probeHandler() v1.ProbeHandler {
handler = v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Host: "127.0.0.1",
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
} else {
handler = v1.ProbeHandler{
TCPSocket: &v1.TCPSocketAction{
Host: "127.0.0.1",
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
}
diff --git a/pkg/proxy/iptables/number_generated_rules_test.go b/pkg/proxy/iptables/number_generated_rules_test.go
index b4cc25ce597..4d75bdee878 100644
--- a/pkg/proxy/iptables/number_generated_rules_test.go
+++ b/pkg/proxy/iptables/number_generated_rules_test.go
@@ -414,7 +414,7 @@ func generateServiceEndpoints(nServices, nEndpoints int, epsFunc func(eps *disco
Name: fmt.Sprintf("%d", epPort),
Protocol: v1.ProtocolTCP,
Port: int32(basePort + i),
- TargetPort: intstr.FromInt(epPort),
+ TargetPort: intstr.FromInt32(int32(epPort)),
},
}
diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go
index 831fb1b6a30..7da8ac5af0f 100644
--- a/pkg/proxy/iptables/proxier_test.go
+++ b/pkg/proxy/iptables/proxier_test.go
@@ -2623,7 +2623,7 @@ func TestExternalIPsReject(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -2698,7 +2698,7 @@ func TestOnlyLocalExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -2810,7 +2810,7 @@ func TestNonLocalExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -3932,7 +3932,7 @@ func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port
Protocol: protocol,
Port: port,
NodePort: nodeport,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}
return append(array, svcPort)
}
diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go
index 22a1b84e638..5fa41441885 100644
--- a/pkg/proxy/ipvs/proxier_test.go
+++ b/pkg/proxy/ipvs/proxier_test.go
@@ -1743,7 +1743,7 @@ func TestExternalIPsNoEndpoint(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -1795,7 +1795,7 @@ func TestExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -1866,7 +1866,7 @@ func TestOnlyLocalExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
}),
@@ -2476,7 +2476,7 @@ func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port
Protocol: protocol,
Port: port,
NodePort: nodeport,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}
return append(array, svcPort)
}
diff --git a/pkg/proxy/service_test.go b/pkg/proxy/service_test.go
index a54d2d0876b..32c5541d501 100644
--- a/pkg/proxy/service_test.go
+++ b/pkg/proxy/service_test.go
@@ -67,7 +67,7 @@ func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port
Protocol: protocol,
Port: port,
NodePort: nodeport,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}
return append(array, svcPort)
}
diff --git a/pkg/registry/apps/statefulset/strategy_test.go b/pkg/registry/apps/statefulset/strategy_test.go
index e46cb62fb6e..fabd90fa896 100644
--- a/pkg/registry/apps/statefulset/strategy_test.go
+++ b/pkg/registry/apps/statefulset/strategy_test.go
@@ -308,7 +308,7 @@ func generateStatefulSetWithMinReadySeconds(minReadySeconds int32) *apps.Statefu
func makeStatefulSetWithMaxUnavailable(maxUnavailable *int) *apps.StatefulSet {
rollingUpdate := apps.RollingUpdateStatefulSetStrategy{}
if maxUnavailable != nil {
- maxUnavailableIntStr := intstr.FromInt(*maxUnavailable)
+ maxUnavailableIntStr := intstr.FromInt32(int32(*maxUnavailable))
rollingUpdate = apps.RollingUpdateStatefulSetStrategy{
MaxUnavailable: &maxUnavailableIntStr,
}
diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index 0ea88156bef..f358c794d10 100644
--- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -72,14 +72,14 @@ func FromString(val string) IntOrString {
return IntOrString{Type: String, StrVal: val}
}
-// Parse the given string and try to convert it to an integer before
+// Parse the given string and try to convert it to an int32 integer before
// setting it as a string value.
func Parse(val string) IntOrString {
- i, err := strconv.Atoi(val)
+ i, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return FromString(val)
}
- return FromInt(i)
+ return FromInt32(int32(i))
}
// UnmarshalJSON implements the json.Unmarshaller interface.
diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go
index b37b023ab99..62909f88230 100644
--- a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go
+++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go
@@ -100,7 +100,7 @@ func tweakAddLBIngress(ip string) serviceTweak {
func makeServicePort(protocol v1.Protocol, targetPort int) []v1.ServicePort {
sp := v1.ServicePort{Port: 80, Protocol: protocol}
if targetPort > 0 {
- sp.TargetPort = intstr.FromInt(targetPort)
+ sp.TargetPort = intstr.FromInt32(int32(targetPort))
}
return []v1.ServicePort{sp}
}
diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go
index f8b20067651..e624d5c1466 100644
--- a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go
+++ b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go
@@ -1709,13 +1709,13 @@ func TestGenerateService(t *testing.T) {
Name: "port-1-tcp",
Port: 53,
Protocol: corev1.ProtocolTCP,
- TargetPort: intstr.FromInt(53),
+ TargetPort: intstr.FromInt32(53),
},
{
Name: "port-1-udp",
Port: 53,
Protocol: corev1.ProtocolUDP,
- TargetPort: intstr.FromInt(53),
+ TargetPort: intstr.FromInt32(53),
},
},
ClusterIP: corev1.ClusterIPNone,
diff --git a/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go b/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go
index 0c1beeaab8b..68b6557a641 100644
--- a/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go
+++ b/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go
@@ -196,7 +196,7 @@ func generateService(genericParams map[string]interface{}) (runtime.Object, erro
if portNum, err := strconv.Atoi(targetPortString); err != nil {
targetPort = intstr.FromString(targetPortString)
} else {
- targetPort = intstr.FromInt(portNum)
+ targetPort = intstr.FromInt32(int32(portNum))
}
// Use the same target-port for every port
for i := range service.Spec.Ports {
diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go
index 7ab6513e869..8b5c6a48350 100644
--- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go
+++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go
@@ -934,13 +934,13 @@ func TestFirewallNeedsUpdate(t *testing.T) {
require.NoError(t, err)
svc := fakeLoadbalancerService("")
svc.Spec.Ports = []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
- {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt(81)},
- {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt(82)},
- {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt(84)},
- {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt(85)},
- {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt(86)},
- {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt(87)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
+ {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt32(81)},
+ {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt32(82)},
+ {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt32(84)},
+ {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt32(85)},
+ {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt32(86)},
+ {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt32(87)},
}
status, err := createExternalLoadBalancer(gce, svc, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
@@ -1643,7 +1643,7 @@ func TestFirewallObject(t *testing.T) {
desc: "empty source ranges",
sourceRanges: utilnet.IPNetSet{},
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
return fw
@@ -1653,7 +1653,7 @@ func TestFirewallObject(t *testing.T) {
desc: "has source ranges",
sourceRanges: sourceRanges,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.SourceRanges = srcRanges
@@ -1665,7 +1665,7 @@ func TestFirewallObject(t *testing.T) {
sourceRanges: utilnet.IPNetSet{},
destinationIP: dstIP,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.DestinationRanges = []string{dstIP}
@@ -1676,9 +1676,9 @@ func TestFirewallObject(t *testing.T) {
desc: "has multiple ports",
sourceRanges: sourceRanges,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
- {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt(82)},
- {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt(84)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
+ {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt32(82)},
+ {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt32(84)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.Allowed = []*compute.FirewallAllowed{
@@ -1695,13 +1695,13 @@ func TestFirewallObject(t *testing.T) {
desc: "has multiple ports",
sourceRanges: sourceRanges,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
- {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt(81)},
- {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt(82)},
- {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt(84)},
- {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt(85)},
- {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt(86)},
- {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt(87)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
+ {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt32(81)},
+ {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt32(82)},
+ {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt32(84)},
+ {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt32(85)},
+ {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt32(86)},
+ {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt32(87)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.Allowed = []*compute.FirewallAllowed{
diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go
index 9e9603271e4..1dc86de1673 100644
--- a/test/e2e/apps/deployment.go
+++ b/test/e2e/apps/deployment.go
@@ -671,7 +671,7 @@ func failureTrap(ctx context.Context, c clientset.Interface, ns string) {
}
func intOrStrP(num int) *intstr.IntOrString {
- intstr := intstr.FromInt(num)
+ intstr := intstr.FromInt32(int32(num))
return &intstr
}
diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go
index b1fe0db5cd2..9dc971956fe 100644
--- a/test/e2e/autoscaling/cluster_size_autoscaling.go
+++ b/test/e2e/autoscaling/cluster_size_autoscaling.go
@@ -1035,7 +1035,7 @@ func runDrainTest(ctx context.Context, f *framework.Framework, migSizes map[stri
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, namespace, "reschedulable-pods")
ginkgo.By("Create a PodDisruptionBudget")
- minAvailable := intstr.FromInt(numPods - pdbSize)
+ minAvailable := intstr.FromInt32(int32(numPods - pdbSize))
pdb := &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pdb",
@@ -1915,7 +1915,7 @@ func addKubeSystemPdbs(ctx context.Context, f *framework.Framework) error {
ginkgo.By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
labelMap := map[string]string{"k8s-app": pdbData.label}
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
- minAvailable := intstr.FromInt(pdbData.minAvailable)
+ minAvailable := intstr.FromInt32(int32(pdbData.minAvailable))
pdb := &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: pdbName,
diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go
index c250ee06a9e..4434078841a 100644
--- a/test/e2e/common/node/container_probe.go
+++ b/test/e2e/common/node/container_probe.go
@@ -1196,7 +1196,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
- Port: intstr.FromInt(8080),
+ Port: intstr.FromInt32(8080),
},
},
InitialDelaySeconds: 10,
@@ -1660,7 +1660,7 @@ func httpGetHandler(path string, port int) v1.ProbeHandler {
return v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Path: path,
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
}
@@ -1668,7 +1668,7 @@ func httpGetHandler(path string, port int) v1.ProbeHandler {
func tcpSocketHandler(port int) v1.ProbeHandler {
return v1.ProbeHandler{
TCPSocket: &v1.TCPSocketAction{
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
}
diff --git a/test/e2e/common/node/lifecycle_hook.go b/test/e2e/common/node/lifecycle_hook.go
index a562a3a00a0..eec1b977e2f 100644
--- a/test/e2e/common/node/lifecycle_hook.go
+++ b/test/e2e/common/node/lifecycle_hook.go
@@ -404,7 +404,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=poststart",
Host: targetIP,
- Port: intstr.FromInt(8080),
+ Port: intstr.FromInt32(8080),
},
},
}
@@ -432,7 +432,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
Scheme: v1.URISchemeHTTPS,
Path: "/echo?msg=poststart",
Host: targetIP,
- Port: intstr.FromInt(9090),
+ Port: intstr.FromInt32(9090),
},
},
}
@@ -459,7 +459,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=prestop",
Host: targetIP,
- Port: intstr.FromInt(8080),
+ Port: intstr.FromInt32(8080),
},
},
}
@@ -487,7 +487,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
Scheme: v1.URISchemeHTTPS,
Path: "/echo?msg=prestop",
Host: targetIP,
- Port: intstr.FromInt(9090),
+ Port: intstr.FromInt32(9090),
},
},
}
diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go
index e135275d479..1dfeda466b8 100644
--- a/test/e2e/common/util.go
+++ b/test/e2e/common/util.go
@@ -132,7 +132,7 @@ func svcByName(name string, port int) *v1.Service {
},
Ports: []v1.ServicePort{{
Port: int32(port),
- TargetPort: intstr.FromInt(port),
+ TargetPort: intstr.FromInt32(int32(port)),
}},
},
}
diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go
index 632c61cf7a5..d52ce3bb07e 100644
--- a/test/e2e/framework/autoscaling/autoscaling_utils.go
+++ b/test/e2e/framework/autoscaling/autoscaling_utils.go
@@ -585,7 +585,7 @@ func createService(ctx context.Context, c clientset.Interface, name, ns string,
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}},
Selector: selectors,
},
diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go
index e4f733a57e6..914218f9ef2 100644
--- a/test/e2e/framework/network/utils.go
+++ b/test/e2e/framework/network/utils.go
@@ -688,8 +688,8 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
- {Port: ClusterHTTPPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHTTPPort)},
- {Port: ClusterUDPPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUDPPort)},
+ {Port: ClusterHTTPPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(EndpointHTTPPort)},
+ {Port: ClusterUDPPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(EndpointUDPPort)},
},
Selector: selector,
SessionAffinity: sessionAffinity,
@@ -697,7 +697,7 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se
}
if config.SCTPEnabled {
- res.Spec.Ports = append(res.Spec.Ports, v1.ServicePort{Port: ClusterSCTPPort, Name: "sctp", Protocol: v1.ProtocolSCTP, TargetPort: intstr.FromInt(EndpointSCTPPort)})
+ res.Spec.Ports = append(res.Spec.Ports, v1.ServicePort{Port: ClusterSCTPPort, Name: "sctp", Protocol: v1.ProtocolSCTP, TargetPort: intstr.FromInt32(EndpointSCTPPort)})
}
if config.DualStackEnabled {
requireDual := v1.IPFamilyPolicyRequireDualStack
diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go
index 88b9c9e9904..663fe8c11d4 100644
--- a/test/e2e/framework/service/resource.go
+++ b/test/e2e/framework/service/resource.go
@@ -148,7 +148,7 @@ func CreateServiceForSimpleApp(ctx context.Context, c clientset.Interface, contP
return []v1.ServicePort{{
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
- TargetPort: intstr.FromInt(contPort),
+ TargetPort: intstr.FromInt32(int32(contPort)),
}}
}
framework.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go
index a2060594964..cd3b1443995 100644
--- a/test/e2e/network/dns_common.go
+++ b/test/e2e/network/dns_common.go
@@ -222,7 +222,7 @@ func (t *dnsTestCommon) createUtilPodLabel(ctx context.Context, baseName string)
{
Protocol: v1.ProtocolTCP,
Port: servicePort,
- TargetPort: intstr.FromInt(servicePort),
+ TargetPort: intstr.FromInt32(servicePort),
},
},
},
diff --git a/test/e2e/network/util.go b/test/e2e/network/util.go
index 11c1bbe028f..6a4e3c391b5 100644
--- a/test/e2e/network/util.go
+++ b/test/e2e/network/util.go
@@ -194,7 +194,7 @@ func createSecondNodePortService(ctx context.Context, f *framework.Framework, co
Port: e2enetwork.ClusterHTTPPort,
Name: "http",
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(e2enetwork.EndpointHTTPPort),
+ TargetPort: intstr.FromInt32(e2enetwork.EndpointHTTPPort),
},
},
Selector: config.NodePortService.Spec.Selector,
diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go
index bc9d74a7ea0..248923781fd 100644
--- a/test/integration/deployment/util.go
+++ b/test/integration/deployment/util.go
@@ -187,7 +187,7 @@ func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
}
func intOrStrP(num int) *intstr.IntOrString {
- intstr := intstr.FromInt(num)
+ intstr := intstr.FromInt32(int32(num))
return &intstr
}
diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go
index d859a6152cd..c57d83247ac 100644
--- a/test/integration/scheduler/preemption/preemption_test.go
+++ b/test/integration/scheduler/preemption/preemption_test.go
@@ -1231,7 +1231,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
}
func mkMinAvailablePDB(name, namespace string, uid types.UID, minAvailable int, matchLabels map[string]string) *policy.PodDisruptionBudget {
- intMinAvailable := intstr.FromInt(minAvailable)
+ intMinAvailable := intstr.FromInt32(int32(minAvailable))
return &policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: name,
|
40
|
update to golangci-lint v1.54.1 + go-ruleguard v0.4.0
That release is the first one with official support for Go 1.21. go-ruleguard
must be >= 0.3.20 because of
https://github.com/quasilyte/go-ruleguard/issues/449 with Go
1.21. golangci-lint itself doesn't depend on a recent enough release yet, so
this was done manually.
|
2023-08-18
| null |
index edef998ea4a..d4661e38b2d 100644
--- a/hack/tools/go.mod
+++ b/hack/tools/go.mod
@@ -7,7 +7,7 @@ require (
github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c
github.com/client9/misspell v0.3.4
github.com/golang/mock v1.6.0
- github.com/golangci/golangci-lint v1.53.3
+ github.com/golangci/golangci-lint v1.54.1
github.com/google/go-flow-levee v0.1.5
go.uber.org/automaxprocs v1.5.2
gotest.tools/gotestsum v1.6.4
@@ -18,19 +18,19 @@ require (
require (
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
4d63.com/gochecknoglobals v0.2.1 // indirect
- github.com/4meepo/tagalign v1.2.2 // indirect
- github.com/Abirdcfly/dupword v0.0.11 // indirect
+ github.com/4meepo/tagalign v1.3.2 // indirect
+ github.com/Abirdcfly/dupword v0.0.12 // indirect
github.com/Antonboom/errname v0.1.10 // indirect
github.com/Antonboom/nilnil v0.1.5 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
- github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect
+ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect
github.com/alexkohler/nakedret/v2 v2.0.2 // indirect
github.com/alexkohler/prealloc v1.0.0 // indirect
github.com/alingse/asasalint v0.0.11 // indirect
- github.com/ashanbrown/forbidigo v1.5.3 // indirect
+ github.com/ashanbrown/forbidigo v1.6.0 // indirect
github.com/ashanbrown/makezero v1.1.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bkielbasa/cyclop v1.2.1 // indirect
@@ -44,7 +44,7 @@ require (
github.com/charithe/durationcheck v0.0.10 // indirect
github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect
github.com/curioswitch/go-reassign v0.2.0 // indirect
- github.com/daixiang0/gci v0.10.1 // indirect
+ github.com/daixiang0/gci v0.11.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denis-tingaikin/go-header v0.4.3 // indirect
github.com/dnephin/pflag v1.0.7 // indirect
@@ -55,7 +55,7 @@ require (
github.com/firefart/nonamedreturns v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
- github.com/go-critic/go-critic v0.8.1 // indirect
+ github.com/go-critic/go-critic v0.9.0 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
github.com/go-toolsmith/astequal v1.1.0 // indirect
@@ -73,7 +73,7 @@ require (
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
- github.com/golangci/misspell v0.4.0 // indirect
+ github.com/golangci/misspell v0.4.1 // indirect
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
@@ -98,7 +98,7 @@ require (
github.com/kisielk/gotool v1.0.0 // indirect
github.com/kkHAIKE/contextcheck v1.1.4 // indirect
github.com/kulti/thelper v0.6.3 // indirect
- github.com/kunwardeep/paralleltest v1.0.7 // indirect
+ github.com/kunwardeep/paralleltest v1.0.8 // indirect
github.com/kyoh86/exportloopref v0.1.11 // indirect
github.com/ldez/gomoddirectives v0.2.3 // indirect
github.com/ldez/tagliatelle v0.5.0 // indirect
@@ -121,18 +121,18 @@ require (
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
github.com/nishanths/exhaustive v0.11.0 // indirect
github.com/nishanths/predeclared v0.2.2 // indirect
- github.com/nunnatsa/ginkgolinter v0.12.1 // indirect
+ github.com/nunnatsa/ginkgolinter v0.13.3 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/polyfloyd/go-errorlint v1.4.2 // indirect
+ github.com/polyfloyd/go-errorlint v1.4.3 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
- github.com/quasilyte/go-ruleguard v0.3.19 // indirect
+ github.com/quasilyte/go-ruleguard v0.4.0 // indirect
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
@@ -167,27 +167,27 @@ require (
github.com/timonwong/loggercheck v0.9.4 // indirect
github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
- github.com/ultraware/funlen v0.0.3 // indirect
+ github.com/ultraware/funlen v0.1.0 // indirect
github.com/ultraware/whitespace v0.0.5 // indirect
- github.com/uudashr/gocognit v1.0.6 // indirect
+ github.com/uudashr/gocognit v1.0.7 // indirect
github.com/xen0n/gosmopolitan v1.2.1 // indirect
github.com/yagipy/maintidx v1.0.0 // indirect
github.com/yeya24/promlinter v0.2.0 // indirect
- github.com/ykadowak/zerologlint v0.1.2 // indirect
- gitlab.com/bosi/decorder v0.2.3 // indirect
- go.tmz.dev/musttag v0.7.0 // indirect
+ github.com/ykadowak/zerologlint v0.1.3 // indirect
+ gitlab.com/bosi/decorder v0.4.0 // indirect
+ go.tmz.dev/musttag v0.7.1 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
- golang.org/x/crypto v0.9.0 // indirect
+ golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect
- golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect
- golang.org/x/mod v0.10.0 // indirect
- golang.org/x/sync v0.2.0 // indirect
- golang.org/x/sys v0.8.0 // indirect
- golang.org/x/term v0.8.0 // indirect
- golang.org/x/text v0.9.0 // indirect
- golang.org/x/tools v0.9.3 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect
+ golang.org/x/mod v0.12.0 // indirect
+ golang.org/x/sync v0.3.0 // indirect
+ golang.org/x/sys v0.11.0 // indirect
+ golang.org/x/term v0.10.0 // indirect
+ golang.org/x/text v0.11.0 // indirect
+ golang.org/x/tools v0.12.0 // indirect
golang.org/x/tools/go/pointer v0.1.0-deprecated // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
diff --git a/hack/tools/go.sum b/hack/tools/go.sum
index e757b513e37..4e63a9b4318 100644
--- a/hack/tools/go.sum
+++ b/hack/tools/go.sum
@@ -41,10 +41,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/4meepo/tagalign v1.2.2 h1:kQeUTkFTaBRtd/7jm8OKJl9iHk0gAO+TDFPHGSna0aw=
-github.com/4meepo/tagalign v1.2.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
-github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU=
-github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA=
+github.com/4meepo/tagalign v1.3.2 h1:1idD3yxlRGV18VjqtDbqYvQ5pXqQS0wO2dn6M3XstvI=
+github.com/4meepo/tagalign v1.3.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
+github.com/Abirdcfly/dupword v0.0.12 h1:56NnOyrXzChj07BDFjeRA+IUzSz01jmzEq+G4kEgFhc=
+github.com/Abirdcfly/dupword v0.0.12/go.mod h1:+us/TGct/nI9Ndcbcp3rgNcQzctTj68pq7TcgNpLfdI=
github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls=
github.com/Antonboom/errname v0.1.10/go.mod h1:xLeiCIrvVNpUtsN0wxAh05bNIZpqE22/qDMnTBTttiA=
github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l0=
@@ -55,8 +55,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
-github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts=
-github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY=
@@ -74,8 +74,8 @@ github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQ
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
github.com/aojea/sloppy-netparser v0.0.0-20210819225411-1b3bd8b3b975 h1:3bpBhtHNVCpJiyO1r7w0BjGhQPPk2eD1ZsVAVS5vmiE=
github.com/aojea/sloppy-netparser v0.0.0-20210819225411-1b3bd8b3b975/go.mod h1:VP81Qd6FKAazakPswOou8ULXGU/j5QH0VcGPzehHx3s=
-github.com/ashanbrown/forbidigo v1.5.3 h1:jfg+fkm/snMx+V9FBwsl1d340BV/99kZGv5jN9hBoXk=
-github.com/ashanbrown/forbidigo v1.5.3/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
+github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY=
+github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@@ -118,8 +118,8 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
-github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0=
-github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
+github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A=
+github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -150,8 +150,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
-github.com/go-critic/go-critic v0.8.1 h1:16omCF1gN3gTzt4j4J6fKI/HnRojhEp+Eks6EuKw3vw=
-github.com/go-critic/go-critic v0.8.1/go.mod h1:kpzXl09SIJX1cr9TB/g/sAG+eFEl7ZS9f9cqvZtyNl0=
+github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U=
+github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -227,14 +227,14 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
-github.com/golangci/golangci-lint v1.53.3 h1:CUcRafczT4t1F+mvdkUm6KuOpxUZTl0yWN/rSU6sSMo=
-github.com/golangci/golangci-lint v1.53.3/go.mod h1:W4Gg3ONq6p3Jl+0s/h9Gr0j7yEgHJWWZO2bHl2tBUXM=
+github.com/golangci/golangci-lint v1.54.1 h1:0qMrH1gkeIBqCZaaAm5Fwq4xys9rO/lJofHfZURIFFk=
+github.com/golangci/golangci-lint v1.54.1/go.mod h1:JK47+qksV/t2mAz9YvndwT0ZLW4A1rvDljOs3g9jblo=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0=
-github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
+github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g=
+github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI=
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ=
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
@@ -345,8 +345,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
-github.com/kunwardeep/paralleltest v1.0.7 h1:2uCk94js0+nVNQoHZNLBkAR1DQJrVzw6T0RMzJn55dQ=
-github.com/kunwardeep/paralleltest v1.0.7/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
+github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558=
+github.com/kunwardeep/paralleltest v1.0.8/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=
@@ -404,8 +404,8 @@ github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8p
github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4=
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
-github.com/nunnatsa/ginkgolinter v0.12.1 h1:vwOqb5Nu05OikTXqhvLdHCGcx5uthIYIl0t79UVrERQ=
-github.com/nunnatsa/ginkgolinter v0.12.1/go.mod h1:AK8Ab1PypVrcGUusuKD8RDcl2KgsIwvNaaxAlyHSzso=
+github.com/nunnatsa/ginkgolinter v0.13.3 h1:wEvjrzSMfDdnoWkctignX9QTf4rT9f4GkQ3uVoXBmiU=
+github.com/nunnatsa/ginkgolinter v0.13.3/go.mod h1:aTKXo8WddENYxNEFT+4ZxEgWXqlD9uMD3w9Bfw/ABEc=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
@@ -427,8 +427,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v1.4.2 h1:CU+O4181IxFDdPH6t/HT7IiDj1I7zxNi1RIUxYwn8d0=
-github.com/polyfloyd/go-errorlint v1.4.2/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
+github.com/polyfloyd/go-errorlint v1.4.3 h1:P6NALOLV8BrWhm6PsqOraUK05E5h8IZnpXYJ+CIg+0U=
+github.com/polyfloyd/go-errorlint v1.4.3/go.mod h1:VPlWPh6hB/wruVG803SuNpLuTGNjLHYlvcdSy4RhdPA=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@@ -452,8 +452,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc=
-github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw=
+github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo=
+github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10=
github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU=
@@ -548,20 +548,20 @@ github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQp
github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
-github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
-github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
+github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
-github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
+github.com/uudashr/gocognit v1.0.7 h1:e9aFXgKgUJrQ5+bs61zBigmj7bFJ/5cC6HmMahVzuDo=
+github.com/uudashr/gocognit v1.0.7/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw=
github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA=
github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
-github.com/ykadowak/zerologlint v0.1.2 h1:Um4P5RMmelfjQqQJKtE8ZW+dLZrXrENeIzWWKw800U4=
-github.com/ykadowak/zerologlint v0.1.2/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
+github.com/ykadowak/zerologlint v0.1.3 h1:TLy1dTW3Nuc+YE3bYRPToG1Q9Ej78b5UUN6bjbGdxPE=
+github.com/ykadowak/zerologlint v0.1.3/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -569,8 +569,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0=
-gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
+gitlab.com/bosi/decorder v0.4.0 h1:HWuxAhSxIvsITcXeP+iIRg9d1cVfvVkmlF7M68GaoDY=
+gitlab.com/bosi/decorder v0.4.0/go.mod h1:xarnteyUoJiOTEldDysquWKTVDCKo2TOIOIibSuWqOg=
go-simpler.org/assert v0.5.0 h1:+5L/lajuQtzmbtEfh69sr5cRf2/xZzyJhFjoOz/PPqs=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@@ -578,8 +578,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.tmz.dev/musttag v0.7.0 h1:QfytzjTWGXZmChoX0L++7uQN+yRCPfyFm+whsM+lfGc=
-go.tmz.dev/musttag v0.7.0/go.mod h1:oTFPvgOkJmp5kYL02S8+jrH0eLrBIl57rzWeA26zDEM=
+go.tmz.dev/musttag v0.7.1 h1:9lFmeSFnFfPuMq4IksHGomItE6NgKMNW2Nt2FPOhCfU=
+go.tmz.dev/musttag v0.7.1/go.mod h1:oJLkpR56EsIryktZJk/B0IroSMi37YWver47fibGh5U=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
@@ -599,8 +599,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
-golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -617,8 +617,8 @@ golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQ
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 h1:J74nGeMgeFnYQJN59eFwh06jX/V8g0lB7LWpjSLxtgU=
-golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ=
+golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -652,8 +652,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
-golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -695,7 +695,7 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
+golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -719,8 +719,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
-golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -781,16 +781,16 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
-golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -802,8 +802,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -878,8 +878,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
-golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
+golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/tools/go/pointer v0.1.0-deprecated h1:PwCkqv2FT35Z4MVxR/tUlvLoL0TkxDjShpBrE4p18Ho=
golang.org/x/tools/go/pointer v0.1.0-deprecated/go.mod h1:Jd+I2inNruJ+5VRdS+jU4S1t17z5y+UCCRa/eBRwilA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/hack/verify-golangci-lint.sh b/hack/verify-golangci-lint.sh
index 7dd154ea05f..fa92a7edb91 100755
--- a/hack/verify-golangci-lint.sh
+++ b/hack/verify-golangci-lint.sh
@@ -48,6 +48,12 @@ PATH="${GOBIN}:${PATH}"
invocation=(./hack/verify-golangci-lint.sh "$@")
+# Disable warnings about the logcheck plugin using the old API
+# (https://github.com/golangci/golangci-lint/issues/4001).
+# Can be removed once logcheck gets updated to a newer release
+# which uses the new plugin API
+export GOLANGCI_LINT_HIDE_WARNING_ABOUT_PLUGIN_API_DEPRECATION=1
+
# The logcheck plugin currently has to be configured via env variables
# (https://github.com/golangci/golangci-lint/issues/1512).
#
|
1
|
Merge pull request #119937 from RyanAoh/kep-1860-dev
Make Kubernetes aware of the LoadBalancer behaviour
|
2023-08-18
| null | null |
2
|
Merge pull request #120025 from tzneal/remove-legacy-test-dependency
remove the legacy test dependency
|
2023-08-18
| null | null |
3
|
add aramase to sig-auth-encryption-at-rest-reviewers
Signed-off-by: Anish Ramasekar <[email protected]>
|
2023-08-18
| null |
index def06e4af8b..3c63423b183 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -63,6 +63,7 @@ aliases:
- smarterclayton
- enj
sig-auth-encryption-at-rest-reviewers:
+ - aramase
- enj
- lavalamp
- liggitt
|
4
|
Merge pull request #119874 from kannon92/pod-replacement-policy-typos
fix typos for pod replacement policy
|
2023-08-18
| null | null |
5
|
Merge pull request #119806 from enj/enj/i/delete_psp_api
Delete PSP API types and generated clients
|
2023-08-18
| null | null |
6
|
remove the legacy test dependency
This removes the import which added a bunch of apparently
old failing tests.
|
2023-08-18
| null |
index 417ef729b81..b1828a583ed 100644
--- a/test/e2e_node/seccompdefault_test.go
+++ b/test/e2e_node/seccompdefault_test.go
@@ -27,13 +27,19 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
+ admissionapi "k8s.io/pod-security-admission/api"
+
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
- "k8s.io/kubernetes/test/e2e/node"
- admissionapi "k8s.io/pod-security-admission/api"
)
+// SeccompProcStatusField is the field of /proc/$PID/status referencing the seccomp filter type.
+const SeccompProcStatusField = "Seccomp:"
+
+// ProcSelfStatusPath is the path to /proc/self/status.
+const ProcSelfStatusPath = "/proc/self/status"
+
// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly]", func() {
f := framework.NewDefaultFramework("seccompdefault-test")
@@ -54,7 +60,7 @@ var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly
{
Name: name,
Image: busyboxImage,
- Command: []string{"grep", node.SeccompProcStatusField, node.ProcSelfStatusPath},
+ Command: []string{"grep", SeccompProcStatusField, ProcSelfStatusPath},
SecurityContext: securityContext,
},
},
|
7
|
Merge pull request #119027 from MadhavJivrajani/go1.21-list-e
[prep for go1.21]: use -e in `go list`
|
2023-08-18
| null | null |
8
|
Merge pull request #119746 from SataQiu/cleanup-scheduler-20230803
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for scheduler
|
2023-08-18
| null | null |
9
|
Merge pull request #119958 from SataQiu/drop-126-api-testdata
Drop v1.26.0 API testdata
|
2023-08-18
| null | null |
10
|
prep for go1.21: use -e in go list
For some reason, in go1.21, go list does not allow
importing main packages anymore, even if it is for
the sake of tracking dependencies (which is a valid
use case).
A suggestion to work around this is to use -e flag to
permit processing of erroneous packages. However, this
doesn't seem prudent.
Signed-off-by: Madhav Jivrajani <[email protected]>
|
2023-08-18
| null |
index 5977037cc4e..f1c46982fa0 100755
--- a/hack/update-vendor.sh
+++ b/hack/update-vendor.sh
@@ -277,7 +277,7 @@ while IFS= read -r repo; do
echo "=== computing imports for ${repo}"
go list all
echo "=== computing tools imports for ${repo}"
- go list -tags=tools all
+ go list -e -tags=tools all
}
# capture module dependencies
|
11
|
Merge pull request #119728 from pohly/ssa-forbid-extract-calls
SSA: prevent usage of Extract calls via forbidigo
|
2023-08-18
| null | null |
12
|
Merge pull request #119489 from carlory/cleanup-e2e-common-framework-equal
e2e_common: stop using deprecated framework.ExpectEqual
|
2023-08-18
| null | null |
14
|
Merge pull request #119982 from liggitt/automated-cherry-pick-of-#119977-upstream-release-1.28
Automated cherry pick of #119977: Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null | null |
15
|
Merge pull request #119562 from my-git9/proxyut
kubeadm: increase ut for cmd/kubeadm/app/phases/addons/proxy
|
2023-08-18
| null | null |
16
|
Merge pull request #119501 from Songjoy/cleanup-e2e-node-framework-equal
e2e_node: stop using deprecated framework.ExpectEqual
|
2023-08-18
| null | null |
17
|
Merge pull request #119097 from pacoxu/fix-eviction-pid
PIDPressure condition is triggered slow on CRI-O with large PID pressure/heavy load
|
2023-08-18
| null | null |
18
|
Merge pull request #119800 from jpbetz/cost-fix
Fixes CEL estimated cost to propagate result sizes correctly
|
2023-08-18
| null | null |
19
|
Merge pull request #119197 from saschagrunert/stop-container-runtime-err
Check dbus error on container runtime start/stop
|
2023-08-18
| null | null |
20
|
Merge pull request #119974 from tzneal/bump-busybox-test-version
bump the busybox test version to resolve test failures
|
2023-08-18
| null | null |
21
|
Merge pull request #119939 from dims/kubectl-lookup-host-in-kubeconfig-when-needed
[kubectl] Lookup Host from kubeconfig when needed
|
2023-08-18
| null | null |
22
|
Merge pull request #119880 from saschagrunert/seccomp-filter
Make seccomp status checks in e2e tests more robust
|
2023-08-18
| null | null |
23
|
Merge pull request #119860 from pohly/golangci-lint-update
golangci-lint update and support for Go 1.21
|
2023-08-18
| null | null |
24
|
Merge pull request #119966 from aojea/fixfix
e2e framework util subtle bug checking endpoints
|
2023-08-18
| null | null |
26
|
Merge pull request #119745 from tsmetana/fix-local-stress-flake
Local PV Stress test: don't fail on deleting missing PV
|
2023-08-18
| null | null |
27
|
Merge pull request #119654 from p0lyn0mial/upstream-watch-list-e2e-panic
e2e/apimachinery/watchlist: stop panicking when run against unsupported cluster/environment
|
2023-08-18
| null | null |
28
|
Merge pull request #119509 from tzneal/fix-describe-node-with-sidecars
kubectl: fix describe node output when sidecars are present
|
2023-08-18
| null | null |
29
|
Merge pull request #118619 from TommyStarK/gh_113832
dynamic resource allocation: reuse gRPC connection
|
2023-08-18
| null | null |
30
|
Merge pull request #119977 from liggitt/optional-gomaxprocs
Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null | null |
31
|
Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null |
index 66772f08a81..983ff368e25 100755
--- a/hack/lib/golang.sh
+++ b/hack/lib/golang.sh
@@ -556,20 +556,25 @@ kube::golang::setup_env() {
# This seems to matter to some tools
export GO15VENDOREXPERIMENT=1
+}
+kube::golang::setup_gomaxprocs() {
# GOMAXPROCS by default does not reflect the number of cpu(s) available
# when running in a container, please see https://github.com/golang/go/issues/33803
- if ! command -v ncpu >/dev/null 2>&1; then
- # shellcheck disable=SC2164
- pushd "${KUBE_ROOT}/hack/tools" >/dev/null
- GO111MODULE=on go install ./ncpu
- # shellcheck disable=SC2164
- popd >/dev/null
+ if [[ -z "${GOMAXPROCS:-}" ]]; then
+ if ! command -v ncpu >/dev/null 2>&1; then
+ # shellcheck disable=SC2164
+ pushd "${KUBE_ROOT}/hack/tools" >/dev/null
+ GO111MODULE=on go install ./ncpu || echo "Will not automatically set GOMAXPROCS"
+ # shellcheck disable=SC2164
+ popd >/dev/null
+ fi
+ if command -v ncpu >/dev/null 2>&1; then
+ GOMAXPROCS=$(ncpu)
+ export GOMAXPROCS
+ kube::log::status "Set GOMAXPROCS automatically to ${GOMAXPROCS}"
+ fi
fi
-
- GOMAXPROCS=${GOMAXPROCS:-$(ncpu)}
- export GOMAXPROCS
- kube::log::status "Setting GOMAXPROCS: ${GOMAXPROCS}"
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh
index 43dde0c740f..49e3e04ac71 100755
--- a/hack/make-rules/test-e2e-node.sh
+++ b/hack/make-rules/test-e2e-node.sh
@@ -18,6 +18,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh
index e9074678a8f..4aa72730d83 100755
--- a/hack/make-rules/test.sh
+++ b/hack/make-rules/test.sh
@@ -22,6 +22,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
32
|
bump the busybox test version to resolve test failures
- bump busybox version
- specify the path to /bin/sleep to avoid calling a new shell
builtin
|
2023-08-18
| null |
index 3dc786c1ee2..1043cad658c 100644
--- a/test/e2e_node/pods_lifecycle_termination_test.go
+++ b/test/e2e_node/pods_lifecycle_termination_test.go
@@ -113,7 +113,7 @@ func getSigkillTargetPod(podName string, ctnName string) *v1.Pod {
Command: []string{
"sh",
"-c",
- "trap \\"echo SIGTERM caught\\" SIGTERM SIGINT; touch /tmp/healthy; sleep 1000",
+ "trap \\"echo SIGTERM caught\\" SIGTERM SIGINT; touch /tmp/healthy; /bin/sleep 1000",
},
// Using readiness probe to guarantee signal handler registering finished
ReadinessProbe: &v1.Probe{
diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go
index 578a0c0f4b4..d28ab6a46bb 100644
--- a/test/utils/image/manifest.go
+++ b/test/utils/image/manifest.go
@@ -238,7 +238,7 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.7"}
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
- configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
+ configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"}
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"}
|
33
|
Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null |
index 66772f08a81..983ff368e25 100755
--- a/hack/lib/golang.sh
+++ b/hack/lib/golang.sh
@@ -556,20 +556,25 @@ kube::golang::setup_env() {
# This seems to matter to some tools
export GO15VENDOREXPERIMENT=1
+}
+kube::golang::setup_gomaxprocs() {
# GOMAXPROCS by default does not reflect the number of cpu(s) available
# when running in a container, please see https://github.com/golang/go/issues/33803
- if ! command -v ncpu >/dev/null 2>&1; then
- # shellcheck disable=SC2164
- pushd "${KUBE_ROOT}/hack/tools" >/dev/null
- GO111MODULE=on go install ./ncpu
- # shellcheck disable=SC2164
- popd >/dev/null
+ if [[ -z "${GOMAXPROCS:-}" ]]; then
+ if ! command -v ncpu >/dev/null 2>&1; then
+ # shellcheck disable=SC2164
+ pushd "${KUBE_ROOT}/hack/tools" >/dev/null
+ GO111MODULE=on go install ./ncpu || echo "Will not automatically set GOMAXPROCS"
+ # shellcheck disable=SC2164
+ popd >/dev/null
+ fi
+ if command -v ncpu >/dev/null 2>&1; then
+ GOMAXPROCS=$(ncpu)
+ export GOMAXPROCS
+ kube::log::status "Set GOMAXPROCS automatically to ${GOMAXPROCS}"
+ fi
fi
-
- GOMAXPROCS=${GOMAXPROCS:-$(ncpu)}
- export GOMAXPROCS
- kube::log::status "Setting GOMAXPROCS: ${GOMAXPROCS}"
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh
index 43dde0c740f..49e3e04ac71 100755
--- a/hack/make-rules/test-e2e-node.sh
+++ b/hack/make-rules/test-e2e-node.sh
@@ -18,6 +18,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh
index e9074678a8f..4aa72730d83 100755
--- a/hack/make-rules/test.sh
+++ b/hack/make-rules/test.sh
@@ -22,6 +22,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
34
|
Merge pull request #119709 from charles-chenzz/fix_flaky
fix flaky test on dra TestPrepareResources/should_timeout
|
2023-08-18
| null | null |
35
|
Merge pull request #119819 from pohly/dra-performance-test-driver
dra test: enhance performance of test driver controller
|
2023-08-18
| null | null |
36
|
Merge pull request #119938 from my-git9/certlist-ut
kubeadm: increase ut for certs/certlist
|
2023-08-18
| null | null |
37
|
Merge pull request #119859 from SataQiu/fix-healthcheck-client-cert-20230809
kubeadm: remove 'system:masters' organization from etcd/healthcheck-client certificate
|
2023-08-18
| null | null |
38
|
e2e framework util subtle bug checking endpoints
Change-Id: Ied14efcb75a45e3bbd5f76d4ee4c89703161df54
|
2023-08-18
| null |
index 407c9e60d79..f10e3254c01 100644
--- a/test/e2e/framework/util.go
+++ b/test/e2e/framework/util.go
@@ -478,7 +478,7 @@ func isIPv6Endpoint(e *v1.Endpoints) bool {
continue
}
// Endpoints are single family, so it is enough to check only one address
- return netutils.IsIPv6String(sub.Addresses[0].IP)
+ return netutils.IsIPv6String(addr.IP)
}
}
// default to IPv4 an Endpoint without IP addresses
|
39
|
Merge pull request #119928 from aojea/fixe2e
e2e WaitForServiceEndpointsNum take into account the address family
|
2023-08-18
| null | null |
41
|
Merge pull request #119914 from luohaha3123/job-feature
Job: Change job controller methods receiver to pointer
|
2023-08-18
| null | null |
42
|
Merge pull request #119907 from Hii-Arpit/Hii-Arpit-Fixing-Broken-Link
Fixing the "Service Account Token" link in the readme
|
2023-08-18
| null | null |
43
|
Merge pull request #119904 from tenzen-y/replace-deprecated-workqueue-lib
Job: Replace deprecated workqueue function with supported one
|
2023-08-18
| null | null |
44
|
Merge pull request #119890 from tzneal/containers-lifecycle-flake
crio: increase test buffer to eliminate test flakes
|
2023-08-18
| null | null |
45
|
Merge pull request #119844 from enj/enj/i/upgrade_regex
wsstream: use a single approach to detect connection upgrade
|
2023-08-18
| null | null |
46
|
Merge pull request #119825 from Jefftree/add-gv
Move adding GroupVersion log until after an update is confirmed
|
2023-08-18
| null | null |
47
|
Merge pull request #119796 from sttts/sttts-caches-populated
client-go: log proper 'caches populated' message, with type and source and only once
|
2023-08-18
| null | null |
48
|
Merge pull request #119795 from sttts/sttts-httplog-impersonation
apiserver/httplog: pretty up impersonation output
|
2023-08-18
| null | null |
49
|
Merge pull request #119794 from aojea/jsonlogkube
implement Stringer for podActions
|
2023-08-18
| null | null |
60
|
kubeadm: restore and generalize the TestMigrateOldConfig test
The test required two APIs to be available to test for migration.
Keep it simple and use a variable "gv" on top of the function body
to easily swap the version to be tested once an old API is deleted.
e.g. currently v1beta3 is the "old" API, v1beta4 is the "new" one.
Ultimately, this test only makes sure that the expected kinds are
available post migration.
|
2023-08-18
| null |
index c713984b22a..879d04aef7f 100644
--- a/cmd/kubeadm/app/util/config/common_test.go
+++ b/cmd/kubeadm/app/util/config/common_test.go
@@ -21,13 +21,16 @@ import (
"reflect"
"testing"
+ "github.com/lithammer/dedent"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/version"
apimachineryversion "k8s.io/apimachinery/pkg/version"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
- kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
+ kubeadmapiv1old "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
+ kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta4"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
+ kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
const KubeadmGroupName = "kubeadm.k8s.io"
@@ -213,209 +216,228 @@ func TestVerifyAPIServerBindAddress(t *testing.T) {
}
}
-// TODO: re-include TestMigrateOldConfigFromFile once a new API version is added after v1beta3.
-// see <link-to-commit-foo> of how this unit test function
-// looked before it was removed with the removal of v1beta2.
-// func TestMigrateOldConfigFromFile(t *testing.T) {
-// tests := []struct {
-// desc string
-// oldCfg string
-// expectedKinds []string
-// expectErr bool
-// }{
-// {
-// desc: "empty file produces empty result",
-// oldCfg: "",
-// expectErr: false,
-// },
-// {
-// desc: "bad config produces error",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectErr: true,
-// },
-// {
-// desc: "InitConfiguration only gets migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "ClusterConfiguration only gets migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: ClusterConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "JoinConfiguration only gets migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Init + Cluster Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: ClusterConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Init + Join Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Cluster + Join Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: ClusterConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Init + Cluster + Join Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: ClusterConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "component configs are not migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: ClusterConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// ---
-// apiVersion: kubeproxy.config.k8s.io/v1alpha1
-// kind: KubeProxyConfiguration
-// ---
-// apiVersion: kubelet.config.k8s.io/v1beta1
-// kind: KubeletConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// }
+// NOTE: do not delete this test once an older API is removed and there is only one API left.
+// Update the inline "gv" and "gvExperimental" variables, to have the GroupVersion String of
+// the API to be tested. If there are no experimental APIs make "gvExperimental" point to
+// an non-experimental API.
+func TestMigrateOldConfig(t *testing.T) {
+ var (
+ gv = kubeadmapiv1old.SchemeGroupVersion.String()
+ gvExperimental = kubeadmapiv1.SchemeGroupVersion.String()
+ )
+ tests := []struct {
+ desc string
+ oldCfg string
+ expectedKinds []string
+ expectErr bool
+ allowExperimental bool
+ }{
+ {
+ desc: "empty file produces empty result",
+ oldCfg: "",
+ expectErr: false,
+ },
+ {
+ desc: "bad config produces error",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ `, gv)),
+ expectErr: true,
+ },
+ {
+ desc: "InitConfiguration only gets migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "ClusterConfiguration only gets migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: ClusterConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "JoinConfiguration only gets migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Init + Cluster Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: ClusterConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Init + Join Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Cluster + Join Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: ClusterConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Init + Cluster + Join Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: ClusterConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "component configs are not migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: ClusterConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ ---
+ apiVersion: kubeproxy.config.k8s.io/v1alpha1
+ kind: KubeProxyConfiguration
+ ---
+ apiVersion: kubelet.config.k8s.io/v1beta1
+ kind: KubeletConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "ClusterConfiguration gets migrated from experimental API",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: ClusterConfiguration
+ `, gvExperimental)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ allowExperimental: true,
+ expectErr: false,
+ },
+ }
-// for _, test := range tests {
-// t.Run(test.desc, func(t *testing.T) {
-// b, err := MigrateOldConfig([]byte(test.oldCfg))
-// if test.expectErr {
-// if err == nil {
-// t.Fatalf("unexpected success:\\n%s", b)
-// }
-// } else {
-// if err != nil {
-// t.Fatalf("unexpected failure: %v", err)
-// }
-// gvks, err := kubeadmutil.GroupVersionKindsFromBytes(b)
-// if err != nil {
-// t.Fatalf("unexpected error returned by GroupVersionKindsFromBytes: %v", err)
-// }
-// if len(gvks) != len(test.expectedKinds) {
-// t.Fatalf("length mismatch between resulting gvks and expected kinds:\\n\\tlen(gvks)=%d\\n\\tlen(expectedKinds)=%d",
-// len(gvks), len(test.expectedKinds))
-// }
-// for _, expectedKind := range test.expectedKinds {
-// if !kubeadmutil.GroupVersionKindsHasKind(gvks, expectedKind) {
-// t.Fatalf("migration failed to produce config kind: %s", expectedKind)
-// }
-// }
-// }
-// })
-// }
-// }
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ b, err := MigrateOldConfig([]byte(test.oldCfg), test.allowExperimental)
+ if test.expectErr {
+ if err == nil {
+ t.Fatalf("unexpected success:\\n%s", b)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("unexpected failure: %v", err)
+ }
+ gvks, err := kubeadmutil.GroupVersionKindsFromBytes(b)
+ if err != nil {
+ t.Fatalf("unexpected error returned by GroupVersionKindsFromBytes: %v", err)
+ }
+ if len(gvks) != len(test.expectedKinds) {
+ t.Fatalf("length mismatch between resulting gvks and expected kinds:\\n\\tlen(gvks)=%d\\n\\tlen(expectedKinds)=%d",
+ len(gvks), len(test.expectedKinds))
+ }
+ for _, expectedKind := range test.expectedKinds {
+ if !kubeadmutil.GroupVersionKindsHasKind(gvks, expectedKind) {
+ t.Fatalf("migration failed to produce config kind: %s", expectedKind)
+ }
+ }
+ }
+ })
+ }
+}
func TestIsKubeadmPrereleaseVersion(t *testing.T) {
validVersionInfo := &apimachineryversion.Info{Major: "1", GitVersion: "v1.23.0-alpha.1"}
|
End of preview. Expand
in Data Studio
No dataset card yet
- Downloads last month
- 187