Search is not available for this dataset
qid
int64 1
10.8k
| question
stringlengths 11
62.2k
| data
stringclasses 1
value | meta
float64 | response
stringlengths 1
55.5M
⌀ |
|---|---|---|---|---|
13
|
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for pkg/scheduler
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/integration/scheduler
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for test/e2e/scheduling
using wait.ConditionWithContextFunc for PodScheduled/PodIsGettingEvicted/PodScheduledIn/PodUnschedulable/PodSchedulingError
|
2023-08-18
| null |
index b8afe554ca8..d035b16721b 100644
--- a/pkg/scheduler/framework/plugins/volumebinding/binder.go
+++ b/pkg/scheduler/framework/plugins/volumebinding/binder.go
@@ -523,7 +523,7 @@ func (b *volumeBinder) BindPodVolumes(ctx context.Context, assumedPod *v1.Pod, p
return err
}
- err = wait.Poll(time.Second, b.bindTimeout, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(ctx, time.Second, b.bindTimeout, false, func(ctx context.Context) (bool, error) {
b, err := b.checkBindings(assumedPod, bindings, claimsToProvision)
return b, err
})
diff --git a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go
index 7c8661045b1..5d7fdca1a1b 100644
--- a/pkg/scheduler/framework/plugins/volumebinding/binder_test.go
+++ b/pkg/scheduler/framework/plugins/volumebinding/binder_test.go
@@ -349,7 +349,7 @@ func (env *testEnv) updateVolumes(ctx context.Context, pvs []*v1.PersistentVolum
}
pvs[i] = newPv
}
- return wait.Poll(100*time.Millisecond, 3*time.Second, func() (bool, error) {
+ return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (bool, error) {
for _, pv := range pvs {
obj, err := env.internalPVCache.GetAPIObj(pv.Name)
if obj == nil || err != nil {
@@ -375,7 +375,7 @@ func (env *testEnv) updateClaims(ctx context.Context, pvcs []*v1.PersistentVolum
}
pvcs[i] = newPvc
}
- return wait.Poll(100*time.Millisecond, 3*time.Second, func() (bool, error) {
+ return wait.PollUntilContextTimeout(ctx, 100*time.Millisecond, 3*time.Second, false, func(ctx context.Context) (bool, error) {
for _, pvc := range pvcs {
obj, err := env.internalPVCCache.GetAPIObj(getPVCName(pvc))
if obj == nil || err != nil {
diff --git a/test/e2e/scheduling/limit_range.go b/test/e2e/scheduling/limit_range.go
index 1321f10cb1c..946c9a757d0 100644
--- a/test/e2e/scheduling/limit_range.go
+++ b/test/e2e/scheduling/limit_range.go
@@ -177,7 +177,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying LimitRange updating is effective")
- err = wait.Poll(time.Second*2, time.Second*20, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(ctx, time.Second*2, time.Second*20, false, func(ctx context.Context) (bool, error) {
limitRange, err = f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).Get(ctx, limitRange.Name, metav1.GetOptions{})
framework.ExpectNoError(err)
return reflect.DeepEqual(limitRange.Spec.Limits[0].Min, newMin), nil
@@ -199,7 +199,7 @@ var _ = SIGDescribe("LimitRange", func() {
framework.ExpectNoError(err)
ginkgo.By("Verifying the LimitRange was deleted")
- err = wait.Poll(time.Second*5, e2eservice.RespondingTimeout, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(ctx, time.Second*5, e2eservice.RespondingTimeout, false, func(ctx context.Context) (bool, error) {
limitRanges, err := f.ClientSet.CoreV1().LimitRanges(f.Namespace.Name).List(ctx, metav1.ListOptions{})
if err != nil {
diff --git a/test/e2e/scheduling/preemption.go b/test/e2e/scheduling/preemption.go
index 5fccfb52e91..6ae213dd16d 100644
--- a/test/e2e/scheduling/preemption.go
+++ b/test/e2e/scheduling/preemption.go
@@ -728,7 +728,7 @@ var _ = SIGDescribe("SchedulerPreemption [Serial]", func() {
// - if it's less than expected replicas, it denotes its pods are under-preempted
// "*2" means pods of ReplicaSet{1,2} are expected to be only preempted once.
expectedRSPods := []int32{1 * 2, 1 * 2, 1}
- err := wait.Poll(framework.Poll, framework.PollShortTimeout, func() (bool, error) {
+ err := wait.PollUntilContextTimeout(ctx, framework.Poll, framework.PollShortTimeout, false, func(ctx context.Context) (bool, error) {
for i := 0; i < len(podNamesSeen); i++ {
got := atomic.LoadInt32(&podNamesSeen[i])
if got < expectedRSPods[i] {
@@ -905,7 +905,7 @@ func createPod(ctx context.Context, f *framework.Framework, conf pausePodConfig)
// waitForPreemptingWithTimeout verifies if 'pod' is preempting within 'timeout', specifically it checks
// if the 'spec.NodeName' field of preemptor 'pod' has been set.
func waitForPreemptingWithTimeout(ctx context.Context, f *framework.Framework, pod *v1.Pod, timeout time.Duration) {
- err := wait.Poll(2*time.Second, timeout, func() (bool, error) {
+ err := wait.PollUntilContextTimeout(ctx, 2*time.Second, timeout, false, func(ctx context.Context) (bool, error) {
pod, err := f.ClientSet.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
diff --git a/test/integration/node/lifecycle_test.go b/test/integration/node/lifecycle_test.go
index 5346c29e01e..e9451d319c5 100644
--- a/test/integration/node/lifecycle_test.go
+++ b/test/integration/node/lifecycle_test.go
@@ -154,7 +154,7 @@ func TestEvictionForNoExecuteTaintAddedByUser(t *testing.T) {
t.Errorf("Failed to taint node in test %s <%s>, err: %v", name, nodes[nodeIndex].Name, err)
}
- err = wait.PollImmediate(time.Second, time.Second*20, testutils.PodIsGettingEvicted(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*20, true, testutils.PodIsGettingEvicted(cs, testPod.Namespace, testPod.Name))
if err != nil {
t.Fatalf("Error %q in test %q when waiting for terminating pod: %q", err, name, klog.KObj(testPod))
}
diff --git a/test/integration/podgc/podgc_test.go b/test/integration/podgc/podgc_test.go
index 799f7f26273..236e350762d 100644
--- a/test/integration/podgc/podgc_test.go
+++ b/test/integration/podgc/podgc_test.go
@@ -148,7 +148,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
if err != nil {
t.Fatalf("Failed to delete node: %v, err: %v", pod.Spec.NodeName, err)
}
- err = wait.PollImmediate(time.Second, time.Second*15, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*15, true, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
if err != nil {
t.Fatalf("Error '%v' while waiting for the pod '%v' to be terminating", err, klog.KObj(pod))
}
@@ -261,7 +261,7 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
t.Fatalf("Error: '%v' while deleting pod: '%v'", err, klog.KObj(pod))
}
// wait until the pod is terminating
- err = wait.PollImmediate(time.Second, time.Second*15, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, time.Second*15, true, testutils.PodIsGettingEvicted(cs, pod.Namespace, pod.Name))
if err != nil {
t.Fatalf("Error '%v' while waiting for the pod '%v' to be terminating", err, klog.KObj(pod))
}
diff --git a/test/integration/scheduler/extender/extender_test.go b/test/integration/scheduler/extender/extender_test.go
index 1ba4a5614cb..d34b8160acc 100644
--- a/test/integration/scheduler/extender/extender_test.go
+++ b/test/integration/scheduler/extender/extender_test.go
@@ -410,7 +410,8 @@ func DoTestPodScheduling(ns *v1.Namespace, t *testing.T, cs clientset.Interface)
t.Fatalf("Failed to create pod: %v", err)
}
- err = wait.Poll(time.Second, wait.ForeverTestTimeout, testutils.PodScheduled(cs, myPod.Namespace, myPod.Name))
+ err = wait.PollUntilContextTimeout(context.TODO(), time.Second, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, myPod.Namespace, myPod.Name))
if err != nil {
t.Fatalf("Failed to schedule pod: %v", err)
}
diff --git a/test/integration/scheduler/filters/filters_test.go b/test/integration/scheduler/filters/filters_test.go
index 07b08d704e9..6ca67b99b64 100644
--- a/test/integration/scheduler/filters/filters_test.go
+++ b/test/integration/scheduler/filters/filters_test.go
@@ -832,7 +832,8 @@ func TestInterPodAffinity(t *testing.T) {
if err != nil {
t.Fatalf("Error while creating pod: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Error while creating pod: %v", err)
}
@@ -849,9 +850,11 @@ func TestInterPodAffinity(t *testing.T) {
}
if test.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Error while trying to fit a pod: %v", err)
@@ -1016,7 +1019,8 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
if err != nil {
t.Fatalf("Error while creating pod: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Error while creating pod: %v", err)
}
@@ -1033,9 +1037,11 @@ func TestInterPodAffinityWithNamespaceSelector(t *testing.T) {
}
if test.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, testPod.Namespace, testPod.Name))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Error while trying to fit a pod: %v", err)
@@ -1517,7 +1523,8 @@ func TestPodTopologySpreadFilter(t *testing.T) {
if err != nil {
t.Fatalf("Error while creating pod during test: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Error while waiting for pod during test: %v", err)
}
@@ -1528,9 +1535,11 @@ func TestPodTopologySpreadFilter(t *testing.T) {
}
if tt.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.candidateNodes))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Test Failed: %v", err)
diff --git a/test/integration/scheduler/plugins/plugins_test.go b/test/integration/scheduler/plugins/plugins_test.go
index 7082bb33400..fa6c181ab40 100644
--- a/test/integration/scheduler/plugins/plugins_test.go
+++ b/test/integration/scheduler/plugins/plugins_test.go
@@ -672,7 +672,8 @@ func TestPreFilterPlugin(t *testing.T) {
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
}
} else if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -844,7 +845,8 @@ func TestPostFilterPlugin(t *testing.T) {
}
if tt.rejectFilter {
- if err = wait.Poll(10*time.Millisecond, 10*time.Second, testutils.PodUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 10*time.Second, false,
+ testutils.PodUnschedulable(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Didn't expect the pod to be scheduled.")
}
@@ -912,7 +914,8 @@ func TestScorePlugin(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -1003,7 +1006,7 @@ func TestReservePluginReserve(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second,
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Didn't expect the pod to be scheduled. error: %v", err)
}
@@ -1131,7 +1134,8 @@ func TestPrebindPlugin(t *testing.T) {
if err = testutils.WaitForPodToScheduleWithTimeout(testCtx.ClientSet, pod, 10*time.Second); err != nil {
t.Errorf("Expected the pod to be schedulable on retry, but got an error: %v", err)
}
- } else if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ } else if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
} else if test.reject {
@@ -1148,7 +1152,7 @@ func TestPrebindPlugin(t *testing.T) {
}
if test.unschedulablePod != nil {
- if err := wait.Poll(10*time.Millisecond, 15*time.Second, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 15*time.Second, false, func(ctx context.Context) (bool, error) {
// 2 means the unschedulable pod is expected to be retried at least twice.
// (one initial attempt plus the one moved by the preBind pod)
return filterPlugin.deepCopy().numFilterCalled >= 2*nodesNum, nil
@@ -1273,7 +1277,8 @@ func TestUnReserveReservePlugins(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
}
@@ -1509,7 +1514,8 @@ func TestUnReserveBindPlugins(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a reasons other than Unschedulable, but got: %v", err)
}
@@ -1681,7 +1687,7 @@ func TestBindPlugin(t *testing.T) {
t.Errorf("Expected %s not to be called, was called %d times.", p2.Name(), p2.numBindCalled)
}
}
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, func() (done bool, err error) {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (done bool, err error) {
p := postBindPlugin.deepCopy()
return p.numPostBindCalled == 1, nil
}); err != nil {
@@ -1692,7 +1698,8 @@ func TestBindPlugin(t *testing.T) {
}
} else {
// bind plugin fails to bind the pod
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
p := postBindPlugin.deepCopy()
@@ -1762,7 +1769,8 @@ func TestPostBindPlugin(t *testing.T) {
}
if test.preBindFail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
if postBindPlugin.numPostBindCalled > 0 {
@@ -1858,7 +1866,8 @@ func TestPermitPlugin(t *testing.T) {
t.Errorf("Error while creating a test pod: %v", err)
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but didn't get it. error: %v", err)
}
} else {
@@ -1907,7 +1916,7 @@ func TestMultiplePermitPlugins(t *testing.T) {
var waitingPod framework.WaitingPod
// Wait until the test pod is actually waiting.
- wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
return waitingPod != nil, nil
})
@@ -1959,14 +1968,14 @@ func TestPermitPluginsCancelled(t *testing.T) {
var waitingPod framework.WaitingPod
// Wait until the test pod is actually waiting.
- wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
waitingPod = perPlugin1.fh.GetWaitingPod(pod.UID)
return waitingPod != nil, nil
})
perPlugin1.rejectAllPods()
// Wait some time for the permit plugins to be cancelled
- err = wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
p1 := perPlugin1.deepCopy()
p2 := perPlugin2.deepCopy()
return p1.cancelled && p2.cancelled, nil
@@ -2100,7 +2109,8 @@ func TestFilterPlugin(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
if filterPlugin.numFilterCalled < 1 {
@@ -2156,7 +2166,8 @@ func TestPreScorePlugin(t *testing.T) {
}
if test.fail {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -2361,7 +2372,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
t.Fatalf("Error while creating the waiting pod: %v", err)
}
// Wait until the waiting-pod is actually waiting.
- if err := wait.Poll(10*time.Millisecond, 30*time.Second, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
w := false
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
return w, nil
@@ -2386,7 +2397,7 @@ func TestPreemptWithPermitPlugin(t *testing.T) {
}
if w := tt.waitingPod; w != nil {
- if err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
w := false
permitPlugin.fh.IterateOverWaitingPods(func(wp framework.WaitingPod) { w = true })
return !w, nil
diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go
index c57d83247ac..c13cbdd6b95 100644
--- a/test/integration/scheduler/preemption/preemption_test.go
+++ b/test/integration/scheduler/preemption/preemption_test.go
@@ -78,8 +78,8 @@ const filterPluginName = "filter-plugin"
var lowPriority, mediumPriority, highPriority = int32(100), int32(200), int32(300)
func waitForNominatedNodeNameWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
- if err := wait.Poll(100*time.Millisecond, timeout, func() (bool, error) {
- pod, err := cs.CoreV1().Pods(pod.Namespace).Get(context.TODO(), pod.Name, metav1.GetOptions{})
+ if err := wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, func(ctx context.Context) (bool, error) {
+ pod, err := cs.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -504,7 +504,8 @@ func TestPreemption(t *testing.T) {
// Wait for preemption of pods and make sure the other ones are not preempted.
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
- if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
+ podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
}
pod, err := cs.CoreV1().Pods(p.Namespace).Get(testCtx.Ctx, p.Name, metav1.GetOptions{})
@@ -883,7 +884,7 @@ func TestPreemptionStarvation(t *testing.T) {
}
// Make sure that all pending pods are being marked unschedulable.
for _, p := range pendingPods {
- if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout,
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false,
podUnschedulable(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Pod %v/%v didn't get marked unschedulable: %v", p.Namespace, p.Name, err)
}
@@ -1214,8 +1215,8 @@ func TestNominatedNodeCleanUp(t *testing.T) {
}
// Verify if .status.nominatedNodeName is cleared.
- if err := wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
- pod, err := cs.CoreV1().Pods(ns).Get(context.TODO(), "medium", metav1.GetOptions{})
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
+ pod, err := cs.CoreV1().Pods(ns).Get(ctx, "medium", metav1.GetOptions{})
if err != nil {
t.Errorf("Error getting the medium pod: %v", err)
}
@@ -1485,7 +1486,8 @@ func TestPDBInPreemption(t *testing.T) {
// Wait for preemption of pods and make sure the other ones are not preempted.
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
- if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
+ podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Test [%v]: Pod %v/%v is not getting evicted.", test.name, p.Namespace, p.Name)
}
} else {
@@ -1622,8 +1624,8 @@ func TestPreferNominatedNode(t *testing.T) {
if err != nil {
t.Errorf("Error while creating high priority pod: %v", err)
}
- err = wait.Poll(100*time.Millisecond, wait.ForeverTestTimeout, func() (bool, error) {
- preemptor, err = cs.CoreV1().Pods(test.pod.Namespace).Get(context.TODO(), test.pod.Name, metav1.GetOptions{})
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
+ preemptor, err = cs.CoreV1().Pods(test.pod.Namespace).Get(ctx, test.pod.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Error getting the preemptor pod info: %v", err)
}
@@ -1975,7 +1977,8 @@ func TestReadWriteOncePodPreemption(t *testing.T) {
// Wait for preemption of pods and make sure the other ones are not preempted.
for i, p := range pods {
if _, found := test.preemptedPodIndexes[i]; found {
- if err = wait.Poll(time.Second, wait.ForeverTestTimeout, podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, time.Second, wait.ForeverTestTimeout, false,
+ podIsGettingEvicted(cs, p.Namespace, p.Name)); err != nil {
t.Errorf("Pod %v/%v is not getting evicted.", p.Namespace, p.Name)
}
} else {
diff --git a/test/integration/scheduler/queue_test.go b/test/integration/scheduler/queue_test.go
index 24603f487f5..7b68fcc8681 100644
--- a/test/integration/scheduler/queue_test.go
+++ b/test/integration/scheduler/queue_test.go
@@ -138,7 +138,7 @@ func TestSchedulingGates(t *testing.T) {
}
// Wait for the pods to be present in the scheduling queue.
- if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
return len(pendingPods) == len(tt.pods), nil
}); err != nil {
@@ -215,7 +215,7 @@ func TestCoreResourceEnqueue(t *testing.T) {
}
// Wait for the three pods to be present in the scheduling queue.
- if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
return len(pendingPods) == 3, nil
}); err != nil {
@@ -396,7 +396,7 @@ func TestCustomResourceEnqueue(t *testing.T) {
}
// Wait for the testing Pod to be present in the scheduling queue.
- if err := wait.Poll(time.Millisecond*200, wait.ForeverTestTimeout, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(ctx, time.Millisecond*200, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
pendingPods, _ := testCtx.Scheduler.SchedulingQueue.PendingPods()
return len(pendingPods) == 1, nil
}); err != nil {
@@ -489,14 +489,14 @@ func TestRequeueByBindFailure(t *testing.T) {
}
// first binding try should fail.
- err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, testutils.PodSchedulingError(cs, ns, "pod-1"))
+ err := wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, testutils.PodSchedulingError(cs, ns, "pod-1"))
if err != nil {
t.Fatalf("Expect pod-1 to be rejected by the bind plugin")
}
// The pod should be enqueued to activeQ/backoffQ without any event.
// The pod should be scheduled in the second binding try.
- err = wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, testutils.PodScheduled(cs, ns, "pod-1"))
+ err = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, testutils.PodScheduled(cs, ns, "pod-1"))
if err != nil {
t.Fatalf("Expect pod-1 to be scheduled by the bind plugin in the second binding try")
}
@@ -610,20 +610,20 @@ func TestRequeueByPermitRejection(t *testing.T) {
})
// Wait for pod-2 to be scheduled.
- err := wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) {
+ err := wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (done bool, err error) {
fakePermit.frameworkHandler.IterateOverWaitingPods(func(wp framework.WaitingPod) {
if wp.GetPod().Name == "pod-2" {
wp.Allow(fakePermitPluginName)
}
})
- return testutils.PodScheduled(cs, ns, "pod-2")()
+ return testutils.PodScheduled(cs, ns, "pod-2")(ctx)
})
if err != nil {
t.Fatalf("Expect pod-2 to be scheduled")
}
- err = wait.Poll(200*time.Millisecond, wait.ForeverTestTimeout, func() (done bool, err error) {
+ err = wait.PollUntilContextTimeout(ctx, 200*time.Millisecond, wait.ForeverTestTimeout, false, func(ctx context.Context) (done bool, err error) {
pod1Found := false
fakePermit.frameworkHandler.IterateOverWaitingPods(func(wp framework.WaitingPod) {
if wp.GetPod().Name == "pod-1" {
diff --git a/test/integration/scheduler/rescheduling_test.go b/test/integration/scheduler/rescheduling_test.go
index 6bf011b3802..67781fff9c5 100644
--- a/test/integration/scheduler/rescheduling_test.go
+++ b/test/integration/scheduler/rescheduling_test.go
@@ -218,7 +218,8 @@ func TestReScheduling(t *testing.T) {
// The first time for scheduling, pod is error or unschedulable, controlled by wantFirstSchedulingError
if test.wantFirstSchedulingError {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
@@ -238,7 +239,8 @@ func TestReScheduling(t *testing.T) {
t.Errorf("Didn't expect the pod to be unschedulable. error: %v", err)
}
} else if test.wantError {
- if err = wait.Poll(10*time.Millisecond, 30*time.Second, testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
+ if err = wait.PollUntilContextTimeout(testCtx.Ctx, 10*time.Millisecond, 30*time.Second, false,
+ testutils.PodSchedulingError(testCtx.ClientSet, pod.Namespace, pod.Name)); err != nil {
t.Errorf("Expected a scheduling error, but got: %v", err)
}
} else {
diff --git a/test/integration/scheduler/scheduler_test.go b/test/integration/scheduler/scheduler_test.go
index 64e714ea71a..bb2d3851aa4 100644
--- a/test/integration/scheduler/scheduler_test.go
+++ b/test/integration/scheduler/scheduler_test.go
@@ -144,7 +144,7 @@ func TestUnschedulableNodes(t *testing.T) {
if err == nil {
t.Errorf("Test %d: Pod scheduled successfully on unschedulable nodes", i)
}
- if err != wait.ErrWaitTimeout {
+ if !wait.Interrupted(err) {
t.Errorf("Test %d: failed while trying to confirm the pod does not get scheduled on the node: %v", i, err)
} else {
t.Logf("Test %d: Pod did not get scheduled on an unschedulable node", i)
@@ -321,7 +321,7 @@ func TestMultipleSchedulingProfiles(t *testing.T) {
}
gotProfiles := make(map[string]string)
- if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
+ if err := wait.PollUntilContextTimeout(testCtx.Ctx, 100*time.Millisecond, 30*time.Second, false, func(ctx context.Context) (bool, error) {
var ev watch.Event
select {
case ev = <-evs.ResultChan():
diff --git a/test/integration/scheduler/scoring/priorities_test.go b/test/integration/scheduler/scoring/priorities_test.go
index ceebf53e0a7..6101bb0f5fa 100644
--- a/test/integration/scheduler/scoring/priorities_test.go
+++ b/test/integration/scheduler/scoring/priorities_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package scoring
import (
+ "context"
"fmt"
"strings"
"testing"
@@ -628,7 +629,8 @@ func TestPodTopologySpreadScoring(t *testing.T) {
if err != nil {
t.Fatalf("Test Failed: error while creating pod during test: %v", err)
}
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ testutils.PodScheduled(cs, createdPod.Namespace, createdPod.Name))
if err != nil {
t.Errorf("Test Failed: error while waiting for pod during test: %v", err)
}
@@ -640,9 +642,11 @@ func TestPodTopologySpreadScoring(t *testing.T) {
}
if tt.fits {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podScheduledIn(cs, testPod.Namespace, testPod.Name, tt.want))
} else {
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, podUnschedulable(cs, testPod.Namespace, testPod.Name))
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false,
+ podUnschedulable(cs, testPod.Namespace, testPod.Name))
}
if err != nil {
t.Errorf("Test Failed: %v", err)
@@ -706,8 +710,8 @@ func TestDefaultPodTopologySpreadScoring(t *testing.T) {
}
var pods []v1.Pod
// Wait for all Pods scheduled.
- err = wait.Poll(pollInterval, wait.ForeverTestTimeout, func() (bool, error) {
- podList, err := cs.CoreV1().Pods(ns).List(testCtx.Ctx, metav1.ListOptions{})
+ err = wait.PollUntilContextTimeout(testCtx.Ctx, pollInterval, wait.ForeverTestTimeout, false, func(ctx context.Context) (bool, error) {
+ podList, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
if err != nil {
t.Fatalf("Cannot list pods to verify scheduling: %v", err)
}
diff --git a/test/integration/util/util.go b/test/integration/util/util.go
index 8d70e0006be..63ffa12f6ed 100644
--- a/test/integration/util/util.go
+++ b/test/integration/util/util.go
@@ -579,7 +579,7 @@ func InitTestSchedulerWithOptions(
// WaitForPodToScheduleWithTimeout waits for a pod to get scheduled and returns
// an error if it does not scheduled within the given timeout.
func WaitForPodToScheduleWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
- return wait.Poll(100*time.Millisecond, timeout, PodScheduled(cs, pod.Namespace, pod.Name))
+ return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, PodScheduled(cs, pod.Namespace, pod.Name))
}
// WaitForPodToSchedule waits for a pod to get scheduled and returns an error if
@@ -589,9 +589,9 @@ func WaitForPodToSchedule(cs clientset.Interface, pod *v1.Pod) error {
}
// PodScheduled checks if the pod has been scheduled
-func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodScheduled(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -899,9 +899,9 @@ func RunPodWithContainers(cs clientset.Interface, pod *v1.Pod) (*v1.Pod, error)
}
// PodIsGettingEvicted returns true if the pod's deletion timestamp is set.
-func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
return false, err
}
@@ -913,9 +913,9 @@ func PodIsGettingEvicted(c clientset.Interface, podNamespace, podName string) wa
}
// PodScheduledIn returns true if a given pod is placed onto one of the expected nodes.
-func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNames []string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -934,9 +934,9 @@ func PodScheduledIn(c clientset.Interface, podNamespace, podName string, nodeNam
// PodUnschedulable returns a condition function that returns true if the given pod
// gets unschedulable status of reason 'Unschedulable'.
-func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -950,9 +950,9 @@ func PodUnschedulable(c clientset.Interface, podNamespace, podName string) wait.
// PodSchedulingError returns a condition function that returns true if the given pod
// gets unschedulable status for reasons other than "Unschedulable". The scheduler
// records such reasons in case of error.
-func PodSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionFunc {
- return func() (bool, error) {
- pod, err := c.CoreV1().Pods(podNamespace).Get(context.TODO(), podName, metav1.GetOptions{})
+func PodSchedulingError(c clientset.Interface, podNamespace, podName string) wait.ConditionWithContextFunc {
+ return func(ctx context.Context) (bool, error) {
+ pod, err := c.CoreV1().Pods(podNamespace).Get(ctx, podName, metav1.GetOptions{})
if err != nil {
// This could be a connection error so we want to retry.
return false, nil
@@ -981,7 +981,7 @@ func PodSchedulingGated(c clientset.Interface, podNamespace, podName string) wai
// WaitForPodUnschedulableWithTimeout waits for a pod to fail scheduling and returns
// an error if it does not become unschedulable within the given timeout.
func WaitForPodUnschedulableWithTimeout(cs clientset.Interface, pod *v1.Pod, timeout time.Duration) error {
- return wait.Poll(100*time.Millisecond, timeout, PodUnschedulable(cs, pod.Namespace, pod.Name))
+ return wait.PollUntilContextTimeout(context.TODO(), 100*time.Millisecond, timeout, false, PodUnschedulable(cs, pod.Namespace, pod.Name))
}
// WaitForPodUnschedulable waits for a pod to fail scheduling and returns
|
25
|
cleanup: Update deprecated FromInt to FromInt32 (#119858)
* redo commit
* apply suggestions from liggitt
* update Parse function based on suggestions
|
2023-08-18
| null |
index 1c57110171f..42725d8dab8 100644
--- a/cmd/kubeadm/app/phases/controlplane/manifests.go
+++ b/cmd/kubeadm/app/phases/controlplane/manifests.go
@@ -63,9 +63,9 @@ func GetStaticPodSpecs(cfg *kubeadmapi.ClusterConfiguration, endpoint *kubeadmap
ImagePullPolicy: v1.PullIfNotPresent,
Command: getAPIServerCommand(cfg, endpoint),
VolumeMounts: staticpodutil.VolumeMountMapToSlice(mounts.GetVolumeMounts(kubeadmconstants.KubeAPIServer)),
- LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", int(endpoint.BindPort), v1.URISchemeHTTPS),
- ReadinessProbe: staticpodutil.ReadinessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/readyz", int(endpoint.BindPort), v1.URISchemeHTTPS),
- StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", int(endpoint.BindPort), v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane),
+ LivenessProbe: staticpodutil.LivenessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", endpoint.BindPort, v1.URISchemeHTTPS),
+ ReadinessProbe: staticpodutil.ReadinessProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/readyz", endpoint.BindPort, v1.URISchemeHTTPS),
+ StartupProbe: staticpodutil.StartupProbe(staticpodutil.GetAPIServerProbeAddress(endpoint), "/livez", endpoint.BindPort, v1.URISchemeHTTPS, cfg.APIServer.TimeoutForControlPlane),
Resources: staticpodutil.ComponentResources("250m"),
Env: kubeadmutil.MergeEnv(proxyEnvs, cfg.APIServer.ExtraEnvs),
}, mounts.GetVolumes(kubeadmconstants.KubeAPIServer),
diff --git a/cmd/kubeadm/app/util/staticpod/utils.go b/cmd/kubeadm/app/util/staticpod/utils.go
index 4f74e7e84ed..cab4feb16bf 100644
--- a/cmd/kubeadm/app/util/staticpod/utils.go
+++ b/cmd/kubeadm/app/util/staticpod/utils.go
@@ -242,20 +242,20 @@ func ReadStaticPodFromDisk(manifestPath string) (*v1.Pod, error) {
}
// LivenessProbe creates a Probe object with a HTTPGet handler
-func LivenessProbe(host, path string, port int, scheme v1.URIScheme) *v1.Probe {
+func LivenessProbe(host, path string, port int32, scheme v1.URIScheme) *v1.Probe {
// sets initialDelaySeconds same as periodSeconds to skip one period before running a check
return createHTTPProbe(host, path, port, scheme, 10, 15, 8, 10)
}
// ReadinessProbe creates a Probe object with a HTTPGet handler
-func ReadinessProbe(host, path string, port int, scheme v1.URIScheme) *v1.Probe {
+func ReadinessProbe(host, path string, port int32, scheme v1.URIScheme) *v1.Probe {
// sets initialDelaySeconds as '0' because we don't want to delay user infrastructure checks
// looking for "ready" status on kubeadm static Pods
return createHTTPProbe(host, path, port, scheme, 0, 15, 3, 1)
}
// StartupProbe creates a Probe object with a HTTPGet handler
-func StartupProbe(host, path string, port int, scheme v1.URIScheme, timeoutForControlPlane *metav1.Duration) *v1.Probe {
+func StartupProbe(host, path string, port int32, scheme v1.URIScheme, timeoutForControlPlane *metav1.Duration) *v1.Probe {
periodSeconds, timeoutForControlPlaneSeconds := int32(10), kubeadmconstants.DefaultControlPlaneTimeout.Seconds()
if timeoutForControlPlane != nil {
timeoutForControlPlaneSeconds = timeoutForControlPlane.Seconds()
@@ -267,13 +267,13 @@ func StartupProbe(host, path string, port int, scheme v1.URIScheme, timeoutForCo
return createHTTPProbe(host, path, port, scheme, periodSeconds, 15, failureThreshold, periodSeconds)
}
-func createHTTPProbe(host, path string, port int, scheme v1.URIScheme, initialDelaySeconds, timeoutSeconds, failureThreshold, periodSeconds int32) *v1.Probe {
+func createHTTPProbe(host, path string, port int32, scheme v1.URIScheme, initialDelaySeconds, timeoutSeconds, failureThreshold, periodSeconds int32) *v1.Probe {
return &v1.Probe{
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Host: host,
Path: path,
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(port),
Scheme: scheme,
},
},
@@ -312,7 +312,7 @@ func GetSchedulerProbeAddress(cfg *kubeadmapi.ClusterConfiguration) string {
// GetEtcdProbeEndpoint takes a kubeadm Etcd configuration object and attempts to parse
// the first URL in the listen-metrics-urls argument, returning an etcd probe hostname,
// port and scheme
-func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int, v1.URIScheme) {
+func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int32, v1.URIScheme) {
localhost := "127.0.0.1"
if isIPv6 {
localhost = "::1"
@@ -346,7 +346,7 @@ func GetEtcdProbeEndpoint(cfg *kubeadmapi.Etcd, isIPv6 bool) (string, int, v1.UR
port = p
}
}
- return hostname, port, scheme
+ return hostname, int32(port), scheme
}
return localhost, kubeadmconstants.EtcdMetricsPort, v1.URISchemeHTTP
}
diff --git a/cmd/kubeadm/app/util/staticpod/utils_test.go b/cmd/kubeadm/app/util/staticpod/utils_test.go
index 8f7b2461cdc..bcea14876d1 100644
--- a/cmd/kubeadm/app/util/staticpod/utils_test.go
+++ b/cmd/kubeadm/app/util/staticpod/utils_test.go
@@ -244,7 +244,7 @@ func TestGetEtcdProbeEndpoint(t *testing.T) {
cfg *kubeadmapi.Etcd
isIPv6 bool
expectedHostname string
- expectedPort int
+ expectedPort int32
expectedScheme v1.URIScheme
}{
{
diff --git a/pkg/apis/apps/v1/defaults_test.go b/pkg/apis/apps/v1/defaults_test.go
index 9028a3bef54..d5683252b2c 100644
--- a/pkg/apis/apps/v1/defaults_test.go
+++ b/pkg/apis/apps/v1/defaults_test.go
@@ -176,7 +176,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) {
}
func getMaxUnavailable(maxUnavailable int) *intstr.IntOrString {
- maxUnavailableIntOrStr := intstr.FromInt(maxUnavailable)
+ maxUnavailableIntOrStr := intstr.FromInt32(int32(maxUnavailable))
return &maxUnavailableIntOrStr
}
diff --git a/pkg/apis/apps/v1beta1/defaults_test.go b/pkg/apis/apps/v1beta1/defaults_test.go
index 6c6e24369ac..086b652174c 100644
--- a/pkg/apis/apps/v1beta1/defaults_test.go
+++ b/pkg/apis/apps/v1beta1/defaults_test.go
@@ -532,6 +532,6 @@ func getPartition(partition int32) *int32 {
}
func getMaxUnavailable(maxUnavailable int) *intstr.IntOrString {
- maxUnavailableIntOrStr := intstr.FromInt(maxUnavailable)
+ maxUnavailableIntOrStr := intstr.FromInt32(int32(maxUnavailable))
return &maxUnavailableIntOrStr
}
diff --git a/pkg/apis/apps/v1beta2/defaults_test.go b/pkg/apis/apps/v1beta2/defaults_test.go
index 11a24389c07..acf05c15466 100644
--- a/pkg/apis/apps/v1beta2/defaults_test.go
+++ b/pkg/apis/apps/v1beta2/defaults_test.go
@@ -176,7 +176,7 @@ func TestSetDefaultDaemonSetSpec(t *testing.T) {
}
func getMaxUnavailable(maxUnavailable int) *intstr.IntOrString {
- maxUnavailableIntOrStr := intstr.FromInt(maxUnavailable)
+ maxUnavailableIntOrStr := intstr.FromInt32(int32(maxUnavailable))
return &maxUnavailableIntOrStr
}
diff --git a/pkg/apis/core/validation/validation_test.go b/pkg/apis/core/validation/validation_test.go
index b19ac05f14b..b40e8ad54df 100644
--- a/pkg/apis/core/validation/validation_test.go
+++ b/pkg/apis/core/validation/validation_test.go
@@ -8197,7 +8197,7 @@ func TestValidateInitContainers(t *testing.T) {
StartupProbe: &core.Probe{
ProbeHandler: core.ProbeHandler{
TCPSocket: &core.TCPSocketAction{
- Port: intstr.FromInt(80),
+ Port: intstr.FromInt32(80),
},
},
SuccessThreshold: 1,
@@ -8413,7 +8413,7 @@ func TestValidateInitContainers(t *testing.T) {
RestartPolicy: &containerRestartPolicyAlways,
StartupProbe: &core.Probe{
ProbeHandler: core.ProbeHandler{
- TCPSocket: &core.TCPSocketAction{Port: intstr.FromInt(80)},
+ TCPSocket: &core.TCPSocketAction{Port: intstr.FromInt32(80)},
},
SuccessThreshold: 2,
},
diff --git a/pkg/controller/daemon/update_test.go b/pkg/controller/daemon/update_test.go
index 74c74fe9b14..86f9ae7fb5c 100644
--- a/pkg/controller/daemon/update_test.go
+++ b/pkg/controller/daemon/update_test.go
@@ -48,7 +48,7 @@ func TestDaemonSetUpdatesPods(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
@@ -90,7 +90,7 @@ func TestDaemonSetUpdatesPodsWithMaxSurge(t *testing.T) {
// surge is thhe controlling amount
maxSurge := 2
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
- ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
+ ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge)))
manager.dsStore.Update(ds)
clearExpectations(t, manager, ds, podControl)
@@ -135,7 +135,7 @@ func TestDaemonSetUpdatesWhenNewPosIsNotReady(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
@@ -171,7 +171,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReady(t *testing.T) {
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
err = manager.dsStore.Update(ds)
if err != nil {
@@ -203,7 +203,7 @@ func TestDaemonSetUpdatesAllOldPodsNotReadyMaxSurge(t *testing.T) {
maxSurge := 3
ds.Spec.Template.Spec.Containers[0].Image = "foo2/bar2"
- ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt(maxSurge))
+ ds.Spec.UpdateStrategy = newUpdateSurge(intstr.FromInt32(int32(maxSurge)))
manager.dsStore.Update(ds)
// all old pods are unavailable so should be surged
@@ -347,7 +347,7 @@ func TestDaemonSetUpdatesNoTemplateChanged(t *testing.T) {
expectSyncDaemonSets(t, manager, ds, podControl, 5, 0, 0)
ds.Spec.UpdateStrategy.Type = apps.RollingUpdateDaemonSetStrategyType
- intStr := intstr.FromInt(maxUnavailable)
+ intStr := intstr.FromInt32(int32(maxUnavailable))
ds.Spec.UpdateStrategy.RollingUpdate = &apps.RollingUpdateDaemonSet{MaxUnavailable: &intStr}
manager.dsStore.Update(ds)
diff --git a/pkg/controller/deployment/sync_test.go b/pkg/controller/deployment/sync_test.go
index f5f2f899d3f..b7605257bd9 100644
--- a/pkg/controller/deployment/sync_test.go
+++ b/pkg/controller/deployment/sync_test.go
@@ -35,7 +35,7 @@ import (
)
func intOrStrP(val int) *intstr.IntOrString {
- intOrStr := intstr.FromInt(val)
+ intOrStr := intstr.FromInt32(int32(val))
return &intOrStr
}
diff --git a/pkg/controller/deployment/util/deployment_util_test.go b/pkg/controller/deployment/util/deployment_util_test.go
index 42c7bbf4377..c0f2f33a96e 100644
--- a/pkg/controller/deployment/util/deployment_util_test.go
+++ b/pkg/controller/deployment/util/deployment_util_test.go
@@ -516,11 +516,11 @@ func TestNewRSNewReplicas(t *testing.T) {
newDeployment.Spec.Strategy = apps.DeploymentStrategy{Type: test.strategyType}
newDeployment.Spec.Strategy.RollingUpdate = &apps.RollingUpdateDeployment{
MaxUnavailable: func(i int) *intstr.IntOrString {
- x := intstr.FromInt(i)
+ x := intstr.FromInt32(int32(i))
return &x
}(1),
MaxSurge: func(i int) *intstr.IntOrString {
- x := intstr.FromInt(i)
+ x := intstr.FromInt32(int32(i))
return &x
}(test.maxSurge),
}
@@ -705,8 +705,8 @@ func TestDeploymentComplete(t *testing.T) {
Replicas: &desired,
Strategy: apps.DeploymentStrategy{
RollingUpdate: &apps.RollingUpdateDeployment{
- MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxUnavailable)),
- MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(maxSurge)),
+ MaxUnavailable: func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(maxUnavailable)),
+ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(maxSurge)),
},
Type: apps.RollingUpdateDeploymentStrategyType,
},
@@ -960,7 +960,7 @@ func TestMaxUnavailable(t *testing.T) {
Replicas: func(i int32) *int32 { return &i }(replicas),
Strategy: apps.DeploymentStrategy{
RollingUpdate: &apps.RollingUpdateDeployment{
- MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1)),
+ MaxSurge: func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(1)),
MaxUnavailable: &maxUnavailable,
},
Type: apps.RollingUpdateDeploymentStrategyType,
@@ -1255,7 +1255,7 @@ func TestGetDeploymentsForReplicaSet(t *testing.T) {
}
func TestMinAvailable(t *testing.T) {
- maxSurge := func(i int) *intstr.IntOrString { x := intstr.FromInt(i); return &x }(int(1))
+ maxSurge := func(i int) *intstr.IntOrString { x := intstr.FromInt32(int32(i)); return &x }(int(1))
deployment := func(replicas int32, maxUnavailable intstr.IntOrString) *apps.Deployment {
return &apps.Deployment{
Spec: apps.DeploymentSpec{
diff --git a/pkg/controlplane/controller/kubernetesservice/controller.go b/pkg/controlplane/controller/kubernetesservice/controller.go
index bfee9fa8555..4966eb9738b 100644
--- a/pkg/controlplane/controller/kubernetesservice/controller.go
+++ b/pkg/controlplane/controller/kubernetesservice/controller.go
@@ -184,7 +184,7 @@ func createPortAndServiceSpec(servicePort int, targetServicePort int, nodePort i
Protocol: corev1.ProtocolTCP,
Port: int32(servicePort),
Name: servicePortName,
- TargetPort: intstr.FromInt(targetServicePort),
+ TargetPort: intstr.FromInt32(int32(targetServicePort)),
}}
serviceType := corev1.ServiceTypeClusterIP
if nodePort > 0 {
diff --git a/pkg/kubelet/lifecycle/handlers_test.go b/pkg/kubelet/lifecycle/handlers_test.go
index 0e6bc0459be..a6d095add38 100644
--- a/pkg/kubelet/lifecycle/handlers_test.go
+++ b/pkg/kubelet/lifecycle/handlers_test.go
@@ -45,7 +45,7 @@ import (
func TestResolvePortInt(t *testing.T) {
expected := 80
- port, err := resolvePort(intstr.FromInt(expected), &v1.Container{})
+ port, err := resolvePort(intstr.FromInt32(int32(expected)), &v1.Container{})
if port != expected {
t.Errorf("expected: %d, saw: %d", expected, port)
}
diff --git a/pkg/kubelet/prober/scale_test.go b/pkg/kubelet/prober/scale_test.go
index 6de9687e183..0b8b003d6f3 100644
--- a/pkg/kubelet/prober/scale_test.go
+++ b/pkg/kubelet/prober/scale_test.go
@@ -257,14 +257,14 @@ func (f *fakePod) probeHandler() v1.ProbeHandler {
handler = v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Host: "127.0.0.1",
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
} else {
handler = v1.ProbeHandler{
TCPSocket: &v1.TCPSocketAction{
Host: "127.0.0.1",
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
}
diff --git a/pkg/proxy/iptables/number_generated_rules_test.go b/pkg/proxy/iptables/number_generated_rules_test.go
index b4cc25ce597..4d75bdee878 100644
--- a/pkg/proxy/iptables/number_generated_rules_test.go
+++ b/pkg/proxy/iptables/number_generated_rules_test.go
@@ -414,7 +414,7 @@ func generateServiceEndpoints(nServices, nEndpoints int, epsFunc func(eps *disco
Name: fmt.Sprintf("%d", epPort),
Protocol: v1.ProtocolTCP,
Port: int32(basePort + i),
- TargetPort: intstr.FromInt(epPort),
+ TargetPort: intstr.FromInt32(int32(epPort)),
},
}
diff --git a/pkg/proxy/iptables/proxier_test.go b/pkg/proxy/iptables/proxier_test.go
index 831fb1b6a30..7da8ac5af0f 100644
--- a/pkg/proxy/iptables/proxier_test.go
+++ b/pkg/proxy/iptables/proxier_test.go
@@ -2623,7 +2623,7 @@ func TestExternalIPsReject(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -2698,7 +2698,7 @@ func TestOnlyLocalExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -2810,7 +2810,7 @@ func TestNonLocalExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -3932,7 +3932,7 @@ func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port
Protocol: protocol,
Port: port,
NodePort: nodeport,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}
return append(array, svcPort)
}
diff --git a/pkg/proxy/ipvs/proxier_test.go b/pkg/proxy/ipvs/proxier_test.go
index 22a1b84e638..5fa41441885 100644
--- a/pkg/proxy/ipvs/proxier_test.go
+++ b/pkg/proxy/ipvs/proxier_test.go
@@ -1743,7 +1743,7 @@ func TestExternalIPsNoEndpoint(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -1795,7 +1795,7 @@ func TestExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
}),
)
@@ -1866,7 +1866,7 @@ func TestOnlyLocalExternalIPs(t *testing.T) {
Name: svcPortName.Port,
Port: int32(svcPort),
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(svcPort),
+ TargetPort: intstr.FromInt32(int32(svcPort)),
}}
svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyLocal
}),
@@ -2476,7 +2476,7 @@ func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port
Protocol: protocol,
Port: port,
NodePort: nodeport,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}
return append(array, svcPort)
}
diff --git a/pkg/proxy/service_test.go b/pkg/proxy/service_test.go
index a54d2d0876b..32c5541d501 100644
--- a/pkg/proxy/service_test.go
+++ b/pkg/proxy/service_test.go
@@ -67,7 +67,7 @@ func addTestPort(array []v1.ServicePort, name string, protocol v1.Protocol, port
Protocol: protocol,
Port: port,
NodePort: nodeport,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}
return append(array, svcPort)
}
diff --git a/pkg/registry/apps/statefulset/strategy_test.go b/pkg/registry/apps/statefulset/strategy_test.go
index e46cb62fb6e..fabd90fa896 100644
--- a/pkg/registry/apps/statefulset/strategy_test.go
+++ b/pkg/registry/apps/statefulset/strategy_test.go
@@ -308,7 +308,7 @@ func generateStatefulSetWithMinReadySeconds(minReadySeconds int32) *apps.Statefu
func makeStatefulSetWithMaxUnavailable(maxUnavailable *int) *apps.StatefulSet {
rollingUpdate := apps.RollingUpdateStatefulSetStrategy{}
if maxUnavailable != nil {
- maxUnavailableIntStr := intstr.FromInt(*maxUnavailable)
+ maxUnavailableIntStr := intstr.FromInt32(int32(*maxUnavailable))
rollingUpdate = apps.RollingUpdateStatefulSetStrategy{
MaxUnavailable: &maxUnavailableIntStr,
}
diff --git a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
index 0ea88156bef..f358c794d10 100644
--- a/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
+++ b/staging/src/k8s.io/apimachinery/pkg/util/intstr/intstr.go
@@ -72,14 +72,14 @@ func FromString(val string) IntOrString {
return IntOrString{Type: String, StrVal: val}
}
-// Parse the given string and try to convert it to an integer before
+// Parse the given string and try to convert it to an int32 integer before
// setting it as a string value.
func Parse(val string) IntOrString {
- i, err := strconv.Atoi(val)
+ i, err := strconv.ParseInt(val, 10, 32)
if err != nil {
return FromString(val)
}
- return FromInt(i)
+ return FromInt32(int32(i))
}
// UnmarshalJSON implements the json.Unmarshaller interface.
diff --git a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go
index b37b023ab99..62909f88230 100644
--- a/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go
+++ b/staging/src/k8s.io/cloud-provider/controllers/service/controller_test.go
@@ -100,7 +100,7 @@ func tweakAddLBIngress(ip string) serviceTweak {
func makeServicePort(protocol v1.Protocol, targetPort int) []v1.ServicePort {
sp := v1.ServicePort{Port: 80, Protocol: protocol}
if targetPort > 0 {
- sp.TargetPort = intstr.FromInt(targetPort)
+ sp.TargetPort = intstr.FromInt32(int32(targetPort))
}
return []v1.ServicePort{sp}
}
diff --git a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go
index f8b20067651..e624d5c1466 100644
--- a/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go
+++ b/staging/src/k8s.io/kubectl/pkg/cmd/expose/expose_test.go
@@ -1709,13 +1709,13 @@ func TestGenerateService(t *testing.T) {
Name: "port-1-tcp",
Port: 53,
Protocol: corev1.ProtocolTCP,
- TargetPort: intstr.FromInt(53),
+ TargetPort: intstr.FromInt32(53),
},
{
Name: "port-1-udp",
Port: 53,
Protocol: corev1.ProtocolUDP,
- TargetPort: intstr.FromInt(53),
+ TargetPort: intstr.FromInt32(53),
},
},
ClusterIP: corev1.ClusterIPNone,
diff --git a/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go b/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go
index 0c1beeaab8b..68b6557a641 100644
--- a/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go
+++ b/staging/src/k8s.io/kubectl/pkg/generate/versioned/service.go
@@ -196,7 +196,7 @@ func generateService(genericParams map[string]interface{}) (runtime.Object, erro
if portNum, err := strconv.Atoi(targetPortString); err != nil {
targetPort = intstr.FromString(targetPortString)
} else {
- targetPort = intstr.FromInt(portNum)
+ targetPort = intstr.FromInt32(int32(portNum))
}
// Use the same target-port for every port
for i := range service.Spec.Ports {
diff --git a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go
index 7ab6513e869..8b5c6a48350 100644
--- a/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go
+++ b/staging/src/k8s.io/legacy-cloud-providers/gce/gce_loadbalancer_external_test.go
@@ -934,13 +934,13 @@ func TestFirewallNeedsUpdate(t *testing.T) {
require.NoError(t, err)
svc := fakeLoadbalancerService("")
svc.Spec.Ports = []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
- {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt(81)},
- {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt(82)},
- {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt(84)},
- {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt(85)},
- {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt(86)},
- {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt(87)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
+ {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt32(81)},
+ {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt32(82)},
+ {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt32(84)},
+ {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt32(85)},
+ {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt32(86)},
+ {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt32(87)},
}
status, err := createExternalLoadBalancer(gce, svc, []string{"test-node-1"}, vals.ClusterName, vals.ClusterID, vals.ZoneName)
@@ -1643,7 +1643,7 @@ func TestFirewallObject(t *testing.T) {
desc: "empty source ranges",
sourceRanges: utilnet.IPNetSet{},
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
return fw
@@ -1653,7 +1653,7 @@ func TestFirewallObject(t *testing.T) {
desc: "has source ranges",
sourceRanges: sourceRanges,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.SourceRanges = srcRanges
@@ -1665,7 +1665,7 @@ func TestFirewallObject(t *testing.T) {
sourceRanges: utilnet.IPNetSet{},
destinationIP: dstIP,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.DestinationRanges = []string{dstIP}
@@ -1676,9 +1676,9 @@ func TestFirewallObject(t *testing.T) {
desc: "has multiple ports",
sourceRanges: sourceRanges,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
- {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt(82)},
- {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt(84)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
+ {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt32(82)},
+ {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt32(84)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.Allowed = []*compute.FirewallAllowed{
@@ -1695,13 +1695,13 @@ func TestFirewallObject(t *testing.T) {
desc: "has multiple ports",
sourceRanges: sourceRanges,
svcPorts: []v1.ServicePort{
- {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt(80)},
- {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt(81)},
- {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt(82)},
- {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt(84)},
- {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt(85)},
- {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt(86)},
- {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt(87)},
+ {Name: "port1", Protocol: v1.ProtocolTCP, Port: int32(80), TargetPort: intstr.FromInt32(80)},
+ {Name: "port2", Protocol: v1.ProtocolTCP, Port: int32(81), TargetPort: intstr.FromInt32(81)},
+ {Name: "port3", Protocol: v1.ProtocolTCP, Port: int32(82), TargetPort: intstr.FromInt32(82)},
+ {Name: "port4", Protocol: v1.ProtocolTCP, Port: int32(84), TargetPort: intstr.FromInt32(84)},
+ {Name: "port5", Protocol: v1.ProtocolTCP, Port: int32(85), TargetPort: intstr.FromInt32(85)},
+ {Name: "port6", Protocol: v1.ProtocolTCP, Port: int32(86), TargetPort: intstr.FromInt32(86)},
+ {Name: "port7", Protocol: v1.ProtocolTCP, Port: int32(88), TargetPort: intstr.FromInt32(87)},
},
expectedFirewall: func(fw compute.Firewall) compute.Firewall {
fw.Allowed = []*compute.FirewallAllowed{
diff --git a/test/e2e/apps/deployment.go b/test/e2e/apps/deployment.go
index 9e9603271e4..1dc86de1673 100644
--- a/test/e2e/apps/deployment.go
+++ b/test/e2e/apps/deployment.go
@@ -671,7 +671,7 @@ func failureTrap(ctx context.Context, c clientset.Interface, ns string) {
}
func intOrStrP(num int) *intstr.IntOrString {
- intstr := intstr.FromInt(num)
+ intstr := intstr.FromInt32(int32(num))
return &intstr
}
diff --git a/test/e2e/autoscaling/cluster_size_autoscaling.go b/test/e2e/autoscaling/cluster_size_autoscaling.go
index b1fe0db5cd2..9dc971956fe 100644
--- a/test/e2e/autoscaling/cluster_size_autoscaling.go
+++ b/test/e2e/autoscaling/cluster_size_autoscaling.go
@@ -1035,7 +1035,7 @@ func runDrainTest(ctx context.Context, f *framework.Framework, migSizes map[stri
ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, namespace, "reschedulable-pods")
ginkgo.By("Create a PodDisruptionBudget")
- minAvailable := intstr.FromInt(numPods - pdbSize)
+ minAvailable := intstr.FromInt32(int32(numPods - pdbSize))
pdb := &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: "test_pdb",
@@ -1915,7 +1915,7 @@ func addKubeSystemPdbs(ctx context.Context, f *framework.Framework) error {
ginkgo.By(fmt.Sprintf("Create PodDisruptionBudget for %v", pdbData.label))
labelMap := map[string]string{"k8s-app": pdbData.label}
pdbName := fmt.Sprintf("test-pdb-for-%v", pdbData.label)
- minAvailable := intstr.FromInt(pdbData.minAvailable)
+ minAvailable := intstr.FromInt32(int32(pdbData.minAvailable))
pdb := &policyv1.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: pdbName,
diff --git a/test/e2e/common/node/container_probe.go b/test/e2e/common/node/container_probe.go
index c250ee06a9e..4434078841a 100644
--- a/test/e2e/common/node/container_probe.go
+++ b/test/e2e/common/node/container_probe.go
@@ -1196,7 +1196,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
ProbeHandler: v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Path: "/healthz",
- Port: intstr.FromInt(8080),
+ Port: intstr.FromInt32(8080),
},
},
InitialDelaySeconds: 10,
@@ -1660,7 +1660,7 @@ func httpGetHandler(path string, port int) v1.ProbeHandler {
return v1.ProbeHandler{
HTTPGet: &v1.HTTPGetAction{
Path: path,
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
}
@@ -1668,7 +1668,7 @@ func httpGetHandler(path string, port int) v1.ProbeHandler {
func tcpSocketHandler(port int) v1.ProbeHandler {
return v1.ProbeHandler{
TCPSocket: &v1.TCPSocketAction{
- Port: intstr.FromInt(port),
+ Port: intstr.FromInt32(int32(port)),
},
}
}
diff --git a/test/e2e/common/node/lifecycle_hook.go b/test/e2e/common/node/lifecycle_hook.go
index a562a3a00a0..eec1b977e2f 100644
--- a/test/e2e/common/node/lifecycle_hook.go
+++ b/test/e2e/common/node/lifecycle_hook.go
@@ -404,7 +404,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=poststart",
Host: targetIP,
- Port: intstr.FromInt(8080),
+ Port: intstr.FromInt32(8080),
},
},
}
@@ -432,7 +432,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
Scheme: v1.URISchemeHTTPS,
Path: "/echo?msg=poststart",
Host: targetIP,
- Port: intstr.FromInt(9090),
+ Port: intstr.FromInt32(9090),
},
},
}
@@ -459,7 +459,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
HTTPGet: &v1.HTTPGetAction{
Path: "/echo?msg=prestop",
Host: targetIP,
- Port: intstr.FromInt(8080),
+ Port: intstr.FromInt32(8080),
},
},
}
@@ -487,7 +487,7 @@ var _ = SIGDescribe("[NodeAlphaFeature:SidecarContainers][Feature:SidecarContain
Scheme: v1.URISchemeHTTPS,
Path: "/echo?msg=prestop",
Host: targetIP,
- Port: intstr.FromInt(9090),
+ Port: intstr.FromInt32(9090),
},
},
}
diff --git a/test/e2e/common/util.go b/test/e2e/common/util.go
index e135275d479..1dfeda466b8 100644
--- a/test/e2e/common/util.go
+++ b/test/e2e/common/util.go
@@ -132,7 +132,7 @@ func svcByName(name string, port int) *v1.Service {
},
Ports: []v1.ServicePort{{
Port: int32(port),
- TargetPort: intstr.FromInt(port),
+ TargetPort: intstr.FromInt32(int32(port)),
}},
},
}
diff --git a/test/e2e/framework/autoscaling/autoscaling_utils.go b/test/e2e/framework/autoscaling/autoscaling_utils.go
index 632c61cf7a5..d52ce3bb07e 100644
--- a/test/e2e/framework/autoscaling/autoscaling_utils.go
+++ b/test/e2e/framework/autoscaling/autoscaling_utils.go
@@ -585,7 +585,7 @@ func createService(ctx context.Context, c clientset.Interface, name, ns string,
Spec: v1.ServiceSpec{
Ports: []v1.ServicePort{{
Port: port,
- TargetPort: intstr.FromInt(targetPort),
+ TargetPort: intstr.FromInt32(int32(targetPort)),
}},
Selector: selectors,
},
diff --git a/test/e2e/framework/network/utils.go b/test/e2e/framework/network/utils.go
index e4f733a57e6..914218f9ef2 100644
--- a/test/e2e/framework/network/utils.go
+++ b/test/e2e/framework/network/utils.go
@@ -688,8 +688,8 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se
Spec: v1.ServiceSpec{
Type: v1.ServiceTypeNodePort,
Ports: []v1.ServicePort{
- {Port: ClusterHTTPPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt(EndpointHTTPPort)},
- {Port: ClusterUDPPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt(EndpointUDPPort)},
+ {Port: ClusterHTTPPort, Name: "http", Protocol: v1.ProtocolTCP, TargetPort: intstr.FromInt32(EndpointHTTPPort)},
+ {Port: ClusterUDPPort, Name: "udp", Protocol: v1.ProtocolUDP, TargetPort: intstr.FromInt32(EndpointUDPPort)},
},
Selector: selector,
SessionAffinity: sessionAffinity,
@@ -697,7 +697,7 @@ func (config *NetworkingTestConfig) createNodePortServiceSpec(svcName string, se
}
if config.SCTPEnabled {
- res.Spec.Ports = append(res.Spec.Ports, v1.ServicePort{Port: ClusterSCTPPort, Name: "sctp", Protocol: v1.ProtocolSCTP, TargetPort: intstr.FromInt(EndpointSCTPPort)})
+ res.Spec.Ports = append(res.Spec.Ports, v1.ServicePort{Port: ClusterSCTPPort, Name: "sctp", Protocol: v1.ProtocolSCTP, TargetPort: intstr.FromInt32(EndpointSCTPPort)})
}
if config.DualStackEnabled {
requireDual := v1.IPFamilyPolicyRequireDualStack
diff --git a/test/e2e/framework/service/resource.go b/test/e2e/framework/service/resource.go
index 88b9c9e9904..663fe8c11d4 100644
--- a/test/e2e/framework/service/resource.go
+++ b/test/e2e/framework/service/resource.go
@@ -148,7 +148,7 @@ func CreateServiceForSimpleApp(ctx context.Context, c clientset.Interface, contP
return []v1.ServicePort{{
Protocol: v1.ProtocolTCP,
Port: int32(svcPort),
- TargetPort: intstr.FromInt(contPort),
+ TargetPort: intstr.FromInt32(int32(contPort)),
}}
}
framework.Logf("Creating a service-for-%v for selecting app=%v-pod", appName, appName)
diff --git a/test/e2e/network/dns_common.go b/test/e2e/network/dns_common.go
index a2060594964..cd3b1443995 100644
--- a/test/e2e/network/dns_common.go
+++ b/test/e2e/network/dns_common.go
@@ -222,7 +222,7 @@ func (t *dnsTestCommon) createUtilPodLabel(ctx context.Context, baseName string)
{
Protocol: v1.ProtocolTCP,
Port: servicePort,
- TargetPort: intstr.FromInt(servicePort),
+ TargetPort: intstr.FromInt32(servicePort),
},
},
},
diff --git a/test/e2e/network/util.go b/test/e2e/network/util.go
index 11c1bbe028f..6a4e3c391b5 100644
--- a/test/e2e/network/util.go
+++ b/test/e2e/network/util.go
@@ -194,7 +194,7 @@ func createSecondNodePortService(ctx context.Context, f *framework.Framework, co
Port: e2enetwork.ClusterHTTPPort,
Name: "http",
Protocol: v1.ProtocolTCP,
- TargetPort: intstr.FromInt(e2enetwork.EndpointHTTPPort),
+ TargetPort: intstr.FromInt32(e2enetwork.EndpointHTTPPort),
},
},
Selector: config.NodePortService.Spec.Selector,
diff --git a/test/integration/deployment/util.go b/test/integration/deployment/util.go
index bc9d74a7ea0..248923781fd 100644
--- a/test/integration/deployment/util.go
+++ b/test/integration/deployment/util.go
@@ -187,7 +187,7 @@ func markPodReady(c clientset.Interface, ns string, pod *v1.Pod) error {
}
func intOrStrP(num int) *intstr.IntOrString {
- intstr := intstr.FromInt(num)
+ intstr := intstr.FromInt32(int32(num))
return &intstr
}
diff --git a/test/integration/scheduler/preemption/preemption_test.go b/test/integration/scheduler/preemption/preemption_test.go
index d859a6152cd..c57d83247ac 100644
--- a/test/integration/scheduler/preemption/preemption_test.go
+++ b/test/integration/scheduler/preemption/preemption_test.go
@@ -1231,7 +1231,7 @@ func TestNominatedNodeCleanUp(t *testing.T) {
}
func mkMinAvailablePDB(name, namespace string, uid types.UID, minAvailable int, matchLabels map[string]string) *policy.PodDisruptionBudget {
- intMinAvailable := intstr.FromInt(minAvailable)
+ intMinAvailable := intstr.FromInt32(int32(minAvailable))
return &policy.PodDisruptionBudget{
ObjectMeta: metav1.ObjectMeta{
Name: name,
|
40
|
update to golangci-lint v1.54.1 + go-ruleguard v0.4.0
That release is the first one with official support for Go 1.21. go-ruleguard
must be >= 0.3.20 because of
https://github.com/quasilyte/go-ruleguard/issues/449 with Go
1.21. golangci-lint itself doesn't depend on a recent enough release yet, so
this was done manually.
|
2023-08-18
| null |
index edef998ea4a..d4661e38b2d 100644
--- a/hack/tools/go.mod
+++ b/hack/tools/go.mod
@@ -7,7 +7,7 @@ require (
github.com/cespare/prettybench v0.0.0-20150116022406-03b8cfe5406c
github.com/client9/misspell v0.3.4
github.com/golang/mock v1.6.0
- github.com/golangci/golangci-lint v1.53.3
+ github.com/golangci/golangci-lint v1.54.1
github.com/google/go-flow-levee v0.1.5
go.uber.org/automaxprocs v1.5.2
gotest.tools/gotestsum v1.6.4
@@ -18,19 +18,19 @@ require (
require (
4d63.com/gocheckcompilerdirectives v1.2.1 // indirect
4d63.com/gochecknoglobals v0.2.1 // indirect
- github.com/4meepo/tagalign v1.2.2 // indirect
- github.com/Abirdcfly/dupword v0.0.11 // indirect
+ github.com/4meepo/tagalign v1.3.2 // indirect
+ github.com/Abirdcfly/dupword v0.0.12 // indirect
github.com/Antonboom/errname v0.1.10 // indirect
github.com/Antonboom/nilnil v0.1.5 // indirect
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 // indirect
- github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 // indirect
+ github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 // indirect
github.com/Masterminds/semver v1.5.0 // indirect
github.com/OpenPeeDeeP/depguard/v2 v2.1.0 // indirect
github.com/alexkohler/nakedret/v2 v2.0.2 // indirect
github.com/alexkohler/prealloc v1.0.0 // indirect
github.com/alingse/asasalint v0.0.11 // indirect
- github.com/ashanbrown/forbidigo v1.5.3 // indirect
+ github.com/ashanbrown/forbidigo v1.6.0 // indirect
github.com/ashanbrown/makezero v1.1.1 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bkielbasa/cyclop v1.2.1 // indirect
@@ -44,7 +44,7 @@ require (
github.com/charithe/durationcheck v0.0.10 // indirect
github.com/chavacava/garif v0.0.0-20230227094218-b8c73b2037b8 // indirect
github.com/curioswitch/go-reassign v0.2.0 // indirect
- github.com/daixiang0/gci v0.10.1 // indirect
+ github.com/daixiang0/gci v0.11.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/denis-tingaikin/go-header v0.4.3 // indirect
github.com/dnephin/pflag v1.0.7 // indirect
@@ -55,7 +55,7 @@ require (
github.com/firefart/nonamedreturns v1.0.4 // indirect
github.com/fsnotify/fsnotify v1.5.4 // indirect
github.com/fzipp/gocyclo v0.6.0 // indirect
- github.com/go-critic/go-critic v0.8.1 // indirect
+ github.com/go-critic/go-critic v0.9.0 // indirect
github.com/go-toolsmith/astcast v1.1.0 // indirect
github.com/go-toolsmith/astcopy v1.1.0 // indirect
github.com/go-toolsmith/astequal v1.1.0 // indirect
@@ -73,7 +73,7 @@ require (
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 // indirect
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 // indirect
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca // indirect
- github.com/golangci/misspell v0.4.0 // indirect
+ github.com/golangci/misspell v0.4.1 // indirect
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 // indirect
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 // indirect
github.com/google/go-cmp v0.5.9 // indirect
@@ -98,7 +98,7 @@ require (
github.com/kisielk/gotool v1.0.0 // indirect
github.com/kkHAIKE/contextcheck v1.1.4 // indirect
github.com/kulti/thelper v0.6.3 // indirect
- github.com/kunwardeep/paralleltest v1.0.7 // indirect
+ github.com/kunwardeep/paralleltest v1.0.8 // indirect
github.com/kyoh86/exportloopref v0.1.11 // indirect
github.com/ldez/gomoddirectives v0.2.3 // indirect
github.com/ldez/tagliatelle v0.5.0 // indirect
@@ -121,18 +121,18 @@ require (
github.com/nbutton23/zxcvbn-go v0.0.0-20210217022336-fa2cb2858354 // indirect
github.com/nishanths/exhaustive v0.11.0 // indirect
github.com/nishanths/predeclared v0.2.2 // indirect
- github.com/nunnatsa/ginkgolinter v0.12.1 // indirect
+ github.com/nunnatsa/ginkgolinter v0.13.3 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/pelletier/go-toml v1.9.5 // indirect
github.com/pelletier/go-toml/v2 v2.0.5 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
- github.com/polyfloyd/go-errorlint v1.4.2 // indirect
+ github.com/polyfloyd/go-errorlint v1.4.3 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
- github.com/quasilyte/go-ruleguard v0.3.19 // indirect
+ github.com/quasilyte/go-ruleguard v0.4.0 // indirect
github.com/quasilyte/gogrep v0.5.0 // indirect
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 // indirect
github.com/quasilyte/stdinfo v0.0.0-20220114132959-f7386bf02567 // indirect
@@ -167,27 +167,27 @@ require (
github.com/timonwong/loggercheck v0.9.4 // indirect
github.com/tomarrell/wrapcheck/v2 v2.8.1 // indirect
github.com/tommy-muehle/go-mnd/v2 v2.5.1 // indirect
- github.com/ultraware/funlen v0.0.3 // indirect
+ github.com/ultraware/funlen v0.1.0 // indirect
github.com/ultraware/whitespace v0.0.5 // indirect
- github.com/uudashr/gocognit v1.0.6 // indirect
+ github.com/uudashr/gocognit v1.0.7 // indirect
github.com/xen0n/gosmopolitan v1.2.1 // indirect
github.com/yagipy/maintidx v1.0.0 // indirect
github.com/yeya24/promlinter v0.2.0 // indirect
- github.com/ykadowak/zerologlint v0.1.2 // indirect
- gitlab.com/bosi/decorder v0.2.3 // indirect
- go.tmz.dev/musttag v0.7.0 // indirect
+ github.com/ykadowak/zerologlint v0.1.3 // indirect
+ gitlab.com/bosi/decorder v0.4.0 // indirect
+ go.tmz.dev/musttag v0.7.1 // indirect
go.uber.org/atomic v1.7.0 // indirect
go.uber.org/multierr v1.6.0 // indirect
go.uber.org/zap v1.24.0 // indirect
- golang.org/x/crypto v0.9.0 // indirect
+ golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea // indirect
- golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 // indirect
- golang.org/x/mod v0.10.0 // indirect
- golang.org/x/sync v0.2.0 // indirect
- golang.org/x/sys v0.8.0 // indirect
- golang.org/x/term v0.8.0 // indirect
- golang.org/x/text v0.9.0 // indirect
- golang.org/x/tools v0.9.3 // indirect
+ golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 // indirect
+ golang.org/x/mod v0.12.0 // indirect
+ golang.org/x/sync v0.3.0 // indirect
+ golang.org/x/sys v0.11.0 // indirect
+ golang.org/x/term v0.10.0 // indirect
+ golang.org/x/text v0.11.0 // indirect
+ golang.org/x/tools v0.12.0 // indirect
golang.org/x/tools/go/pointer v0.1.0-deprecated // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/ini.v1 v1.67.0 // indirect
diff --git a/hack/tools/go.sum b/hack/tools/go.sum
index e757b513e37..4e63a9b4318 100644
--- a/hack/tools/go.sum
+++ b/hack/tools/go.sum
@@ -41,10 +41,10 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20201218220906-28db891af037/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
-github.com/4meepo/tagalign v1.2.2 h1:kQeUTkFTaBRtd/7jm8OKJl9iHk0gAO+TDFPHGSna0aw=
-github.com/4meepo/tagalign v1.2.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
-github.com/Abirdcfly/dupword v0.0.11 h1:z6v8rMETchZXUIuHxYNmlUAuKuB21PeaSymTed16wgU=
-github.com/Abirdcfly/dupword v0.0.11/go.mod h1:wH8mVGuf3CP5fsBTkfWwwwKTjDnVVCxtU8d8rgeVYXA=
+github.com/4meepo/tagalign v1.3.2 h1:1idD3yxlRGV18VjqtDbqYvQ5pXqQS0wO2dn6M3XstvI=
+github.com/4meepo/tagalign v1.3.2/go.mod h1:Q9c1rYMZJc9dPRkbQPpcBNCLEmY2njbAsXhQOZFE2dE=
+github.com/Abirdcfly/dupword v0.0.12 h1:56NnOyrXzChj07BDFjeRA+IUzSz01jmzEq+G4kEgFhc=
+github.com/Abirdcfly/dupword v0.0.12/go.mod h1:+us/TGct/nI9Ndcbcp3rgNcQzctTj68pq7TcgNpLfdI=
github.com/Antonboom/errname v0.1.10 h1:RZ7cYo/GuZqjr1nuJLNe8ZH+a+Jd9DaZzttWzak9Bls=
github.com/Antonboom/errname v0.1.10/go.mod h1:xLeiCIrvVNpUtsN0wxAh05bNIZpqE22/qDMnTBTttiA=
github.com/Antonboom/nilnil v0.1.5 h1:X2JAdEVcbPaOom2TUa1FxZ3uyuUlex0XMLGYMemu6l0=
@@ -55,8 +55,8 @@ github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbi
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24 h1:sHglBQTwgx+rWPdisA5ynNEsoARbiCBOyGcJM4/OzsM=
github.com/Djarvur/go-err113 v0.0.0-20210108212216-aea10b59be24/go.mod h1:4UJr5HIiMZrwgkSPdsjy2uOQExX/WEILpIrO9UPGuXs=
-github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0 h1:+r1rSv4gvYn0wmRjC8X7IAzX8QezqtFV9m0MUHFJgts=
-github.com/GaijinEntertainment/go-exhaustruct/v2 v2.3.0/go.mod h1:b3g59n2Y+T5xmcxJL+UEG2f8cQploZm1mR/v6BW0mU0=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0 h1:3ZBs7LAezy8gh0uECsA6CGU43FF3zsx5f4eah5FxTMA=
+github.com/GaijinEntertainment/go-exhaustruct/v3 v3.1.0/go.mod h1:rZLTje5A9kFBe0pzhpe2TdhRniBF++PRHQuRpR8esVc=
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
github.com/OpenPeeDeeP/depguard/v2 v2.1.0 h1:aQl70G173h/GZYhWf36aE5H0KaujXfVMnn/f1kSDVYY=
@@ -74,8 +74,8 @@ github.com/alingse/asasalint v0.0.11 h1:SFwnQXJ49Kx/1GghOFz1XGqHYKp21Kq1nHad/0WQ
github.com/alingse/asasalint v0.0.11/go.mod h1:nCaoMhw7a9kSJObvQyVzNTPBDbNpdocqrSP7t/cW5+I=
github.com/aojea/sloppy-netparser v0.0.0-20210819225411-1b3bd8b3b975 h1:3bpBhtHNVCpJiyO1r7w0BjGhQPPk2eD1ZsVAVS5vmiE=
github.com/aojea/sloppy-netparser v0.0.0-20210819225411-1b3bd8b3b975/go.mod h1:VP81Qd6FKAazakPswOou8ULXGU/j5QH0VcGPzehHx3s=
-github.com/ashanbrown/forbidigo v1.5.3 h1:jfg+fkm/snMx+V9FBwsl1d340BV/99kZGv5jN9hBoXk=
-github.com/ashanbrown/forbidigo v1.5.3/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
+github.com/ashanbrown/forbidigo v1.6.0 h1:D3aewfM37Yb3pxHujIPSpTf6oQk9sc9WZi8gerOIVIY=
+github.com/ashanbrown/forbidigo v1.6.0/go.mod h1:Y8j9jy9ZYAEHXdu723cUlraTqbzjKF1MUyfOKL+AjcU=
github.com/ashanbrown/makezero v1.1.1 h1:iCQ87C0V0vSyO+M9E/FZYbu65auqH0lnsOkf5FcB28s=
github.com/ashanbrown/makezero v1.1.1/go.mod h1:i1bJLCRSCHOcOa9Y6MyF2FTfMZMFdHvxKHxgO5Z1axI=
github.com/benbjohnson/clock v1.1.0 h1:Q92kusRqC1XV2MjkWETPvjJVqKetz1OzxZB7mHJLju8=
@@ -118,8 +118,8 @@ github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnht
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/curioswitch/go-reassign v0.2.0 h1:G9UZyOcpk/d7Gd6mqYgd8XYWFMw/znxwGDUstnC9DIo=
github.com/curioswitch/go-reassign v0.2.0/go.mod h1:x6OpXuWvgfQaMGks2BZybTngWjT84hqJfKoO8Tt/Roc=
-github.com/daixiang0/gci v0.10.1 h1:eheNA3ljF6SxnPD/vE4lCBusVHmV3Rs3dkKvFrJ7MR0=
-github.com/daixiang0/gci v0.10.1/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
+github.com/daixiang0/gci v0.11.0 h1:XeQbFKkCRxvVyn06EOuNY6LPGBLVuB/W130c8FrnX6A=
+github.com/daixiang0/gci v0.11.0/go.mod h1:xtHP9N7AHdNvtRNfcx9gwTDfw7FRJx4bZUsiEfiNNAI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -150,8 +150,8 @@ github.com/fsnotify/fsnotify v1.5.4 h1:jRbGcIw6P2Meqdwuo0H1p6JVLbL5DHKAKlYndzMwV
github.com/fsnotify/fsnotify v1.5.4/go.mod h1:OVB6XrOHzAwXMpEM7uPOzcehqUV2UqJxmVXmkdnm1bU=
github.com/fzipp/gocyclo v0.6.0 h1:lsblElZG7d3ALtGMx9fmxeTKZaLLpU8mET09yN4BBLo=
github.com/fzipp/gocyclo v0.6.0/go.mod h1:rXPyn8fnlpa0R2csP/31uerbiVBugk5whMdlyaLkLoA=
-github.com/go-critic/go-critic v0.8.1 h1:16omCF1gN3gTzt4j4J6fKI/HnRojhEp+Eks6EuKw3vw=
-github.com/go-critic/go-critic v0.8.1/go.mod h1:kpzXl09SIJX1cr9TB/g/sAG+eFEl7ZS9f9cqvZtyNl0=
+github.com/go-critic/go-critic v0.9.0 h1:Pmys9qvU3pSML/3GEQ2Xd9RZ/ip+aXHKILuxczKGV/U=
+github.com/go-critic/go-critic v0.9.0/go.mod h1:5P8tdXL7m/6qnyG6oRAlYLORvoXH0WDypYgAEmagT40=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
@@ -227,14 +227,14 @@ github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe h1:6RGUuS7EGotKx6
github.com/golangci/go-misc v0.0.0-20220329215616-d24fe342adfe/go.mod h1:gjqyPShc/m8pEMpk0a3SeagVb0kaqvhscv+i9jI5ZhQ=
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2 h1:amWTbTGqOZ71ruzrdA+Nx5WA3tV1N0goTspwmKCQvBY=
github.com/golangci/gofmt v0.0.0-20220901101216-f2edd75033f2/go.mod h1:9wOXstvyDRshQ9LggQuzBCGysxs3b6Uo/1MvYCR2NMs=
-github.com/golangci/golangci-lint v1.53.3 h1:CUcRafczT4t1F+mvdkUm6KuOpxUZTl0yWN/rSU6sSMo=
-github.com/golangci/golangci-lint v1.53.3/go.mod h1:W4Gg3ONq6p3Jl+0s/h9Gr0j7yEgHJWWZO2bHl2tBUXM=
+github.com/golangci/golangci-lint v1.54.1 h1:0qMrH1gkeIBqCZaaAm5Fwq4xys9rO/lJofHfZURIFFk=
+github.com/golangci/golangci-lint v1.54.1/go.mod h1:JK47+qksV/t2mAz9YvndwT0ZLW4A1rvDljOs3g9jblo=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0 h1:MfyDlzVjl1hoaPzPD4Gpb/QgoRfSBR0jdhwGyAWwMSA=
github.com/golangci/lint-1 v0.0.0-20191013205115-297bf364a8e0/go.mod h1:66R6K6P6VWk9I95jvqGxkqJxVWGFy9XlDwLwVz1RCFg=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca h1:kNY3/svz5T29MYHubXix4aDDuE3RWHkPvopM/EDv/MA=
github.com/golangci/maligned v0.0.0-20180506175553-b1d89398deca/go.mod h1:tvlJhZqDe4LMs4ZHD0oMUlt9G2LWuDGoisJTBzLMV9o=
-github.com/golangci/misspell v0.4.0 h1:KtVB/hTK4bbL/S6bs64rYyk8adjmh1BygbBiaAiX+a0=
-github.com/golangci/misspell v0.4.0/go.mod h1:W6O/bwV6lGDxUCChm2ykw9NQdd5bYd1Xkjo88UcWyJc=
+github.com/golangci/misspell v0.4.1 h1:+y73iSicVy2PqyX7kmUefHusENlrP9YwuHZHPLGQj/g=
+github.com/golangci/misspell v0.4.1/go.mod h1:9mAN1quEo3DlpbaIKKyEvRxK1pwqR9s/Sea1bJCtlNI=
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6 h1:DIPQnGy2Gv2FSA4B/hh8Q7xx3B7AIDk3DAMeHclH1vQ=
github.com/golangci/revgrep v0.0.0-20220804021717-745bb2f7c2e6/go.mod h1:0AKcRCkMoKvUvlf89F6O7H2LYdhr1zBh736mBItOdRs=
github.com/golangci/unconvert v0.0.0-20180507085042-28b1c447d1f4 h1:zwtduBRr5SSWhqsYNgcuWO2kFlpdOZbP0+yRjmvPGys=
@@ -345,8 +345,8 @@ github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kulti/thelper v0.6.3 h1:ElhKf+AlItIu+xGnI990no4cE2+XaSu1ULymV2Yulxs=
github.com/kulti/thelper v0.6.3/go.mod h1:DsqKShOvP40epevkFrvIwkCMNYxMeTNjdWL4dqWHZ6I=
-github.com/kunwardeep/paralleltest v1.0.7 h1:2uCk94js0+nVNQoHZNLBkAR1DQJrVzw6T0RMzJn55dQ=
-github.com/kunwardeep/paralleltest v1.0.7/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
+github.com/kunwardeep/paralleltest v1.0.8 h1:Ul2KsqtzFxTlSU7IP0JusWlLiNqQaloB9vguyjbE558=
+github.com/kunwardeep/paralleltest v1.0.8/go.mod h1:2C7s65hONVqY7Q5Efj5aLzRCNLjw2h4eMc9EcypGjcY=
github.com/kyoh86/exportloopref v0.1.11 h1:1Z0bcmTypkL3Q4k+IDHMWTcnCliEZcaPiIe0/ymEyhQ=
github.com/kyoh86/exportloopref v0.1.11/go.mod h1:qkV4UF1zGl6EkF1ox8L5t9SwyeBAZ3qLMd6up458uqA=
github.com/ldez/gomoddirectives v0.2.3 h1:y7MBaisZVDYmKvt9/l1mjNCiSA1BVn34U0ObUcJwlhA=
@@ -404,8 +404,8 @@ github.com/nishanths/exhaustive v0.11.0 h1:T3I8nUGhl/Cwu5Z2hfc92l0e04D2GEW6e0l8p
github.com/nishanths/exhaustive v0.11.0/go.mod h1:RqwDsZ1xY0dNdqHho2z6X+bgzizwbLYOWnZbbl2wLB4=
github.com/nishanths/predeclared v0.2.2 h1:V2EPdZPliZymNAn79T8RkNApBjMmVKh5XRpLm/w98Vk=
github.com/nishanths/predeclared v0.2.2/go.mod h1:RROzoN6TnGQupbC+lqggsOlcgysk3LMK/HI84Mp280c=
-github.com/nunnatsa/ginkgolinter v0.12.1 h1:vwOqb5Nu05OikTXqhvLdHCGcx5uthIYIl0t79UVrERQ=
-github.com/nunnatsa/ginkgolinter v0.12.1/go.mod h1:AK8Ab1PypVrcGUusuKD8RDcl2KgsIwvNaaxAlyHSzso=
+github.com/nunnatsa/ginkgolinter v0.13.3 h1:wEvjrzSMfDdnoWkctignX9QTf4rT9f4GkQ3uVoXBmiU=
+github.com/nunnatsa/ginkgolinter v0.13.3/go.mod h1:aTKXo8WddENYxNEFT+4ZxEgWXqlD9uMD3w9Bfw/ABEc=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo/v2 v2.9.4 h1:xR7vG4IXt5RWx6FfIjyAtsoMAtnc3C/rFXBBd2AjZwE=
@@ -427,8 +427,8 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pkg/sftp v1.13.1/go.mod h1:3HaPG6Dq1ILlpPZRO0HVMrsydcdLt6HRDccSgb87qRg=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
-github.com/polyfloyd/go-errorlint v1.4.2 h1:CU+O4181IxFDdPH6t/HT7IiDj1I7zxNi1RIUxYwn8d0=
-github.com/polyfloyd/go-errorlint v1.4.2/go.mod h1:k6fU/+fQe38ednoZS51T7gSIGQW1y94d6TkSr35OzH8=
+github.com/polyfloyd/go-errorlint v1.4.3 h1:P6NALOLV8BrWhm6PsqOraUK05E5h8IZnpXYJ+CIg+0U=
+github.com/polyfloyd/go-errorlint v1.4.3/go.mod h1:VPlWPh6hB/wruVG803SuNpLuTGNjLHYlvcdSy4RhdPA=
github.com/prashantv/gostub v1.1.0 h1:BTyx3RfQjRHnUWaGF9oQos79AlQ5k8WNktv7VGvVH4g=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
@@ -452,8 +452,8 @@ github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4O
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
-github.com/quasilyte/go-ruleguard v0.3.19 h1:tfMnabXle/HzOb5Xe9CUZYWXKfkS1KwRmZyPmD9nVcc=
-github.com/quasilyte/go-ruleguard v0.3.19/go.mod h1:lHSn69Scl48I7Gt9cX3VrbsZYvYiBYszZOZW4A+oTEw=
+github.com/quasilyte/go-ruleguard v0.4.0 h1:DyM6r+TKL+xbKB4Nm7Afd1IQh9kEUKQs2pboWGKtvQo=
+github.com/quasilyte/go-ruleguard v0.4.0/go.mod h1:Eu76Z/R8IXtViWUIHkE3p8gdH3/PKk1eh3YGfaEof10=
github.com/quasilyte/gogrep v0.5.0 h1:eTKODPXbI8ffJMN+W2aE0+oL0z/nh8/5eNdiO34SOAo=
github.com/quasilyte/gogrep v0.5.0/go.mod h1:Cm9lpz9NZjEoL1tgZ2OgeUKPIxL1meE7eo60Z6Sk+Ng=
github.com/quasilyte/regex/syntax v0.0.0-20210819130434-b3f0c404a727 h1:TCg2WBOl980XxGFEZSS6KlBGIV0diGdySzxATTWoqaU=
@@ -548,20 +548,20 @@ github.com/tomarrell/wrapcheck/v2 v2.8.1 h1:HxSqDSN0sAt0yJYsrcYVoEeyM4aI9yAm3KQp
github.com/tomarrell/wrapcheck/v2 v2.8.1/go.mod h1:/n2Q3NZ4XFT50ho6Hbxg+RV1uyo2Uow/Vdm9NQcl5SE=
github.com/tommy-muehle/go-mnd/v2 v2.5.1 h1:NowYhSdyE/1zwK9QCLeRb6USWdoif80Ie+v+yU8u1Zw=
github.com/tommy-muehle/go-mnd/v2 v2.5.1/go.mod h1:WsUAkMJMYww6l/ufffCD3m+P7LEvr8TnZn9lwVDlgzw=
-github.com/ultraware/funlen v0.0.3 h1:5ylVWm8wsNwH5aWo9438pwvsK0QiqVuUrt9bn7S/iLA=
-github.com/ultraware/funlen v0.0.3/go.mod h1:Dp4UiAus7Wdb9KUZsYWZEWiRzGuM2kXM1lPbfaF6xhA=
+github.com/ultraware/funlen v0.1.0 h1:BuqclbkY6pO+cvxoq7OsktIXZpgBSkYTQtmwhAK81vI=
+github.com/ultraware/funlen v0.1.0/go.mod h1:XJqmOQja6DpxarLj6Jj1U7JuoS8PvL4nEqDaQhy22p4=
github.com/ultraware/whitespace v0.0.5 h1:hh+/cpIcopyMYbZNVov9iSxvJU3OYQg78Sfaqzi/CzI=
github.com/ultraware/whitespace v0.0.5/go.mod h1:aVMh/gQve5Maj9hQ/hg+F75lr/X5A89uZnzAmWSineA=
-github.com/uudashr/gocognit v1.0.6 h1:2Cgi6MweCsdB6kpcVQp7EW4U23iBFQWfTXiWlyp842Y=
-github.com/uudashr/gocognit v1.0.6/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
+github.com/uudashr/gocognit v1.0.7 h1:e9aFXgKgUJrQ5+bs61zBigmj7bFJ/5cC6HmMahVzuDo=
+github.com/uudashr/gocognit v1.0.7/go.mod h1:nAIUuVBnYU7pcninia3BHOvQkpQCeO76Uscky5BOwcY=
github.com/xen0n/gosmopolitan v1.2.1 h1:3pttnTuFumELBRSh+KQs1zcz4fN6Zy7aB0xlnQSn1Iw=
github.com/xen0n/gosmopolitan v1.2.1/go.mod h1:JsHq/Brs1o050OOdmzHeOr0N7OtlnKRAGAsElF8xBQA=
github.com/yagipy/maintidx v1.0.0 h1:h5NvIsCz+nRDapQ0exNv4aJ0yXSI0420omVANTv3GJM=
github.com/yagipy/maintidx v1.0.0/go.mod h1:0qNf/I/CCZXSMhsRsrEPDZ+DkekpKLXAJfsTACwgXLk=
github.com/yeya24/promlinter v0.2.0 h1:xFKDQ82orCU5jQujdaD8stOHiv8UN68BSdn2a8u8Y3o=
github.com/yeya24/promlinter v0.2.0/go.mod h1:u54lkmBOZrpEbQQ6gox2zWKKLKu2SGe+2KOiextY+IA=
-github.com/ykadowak/zerologlint v0.1.2 h1:Um4P5RMmelfjQqQJKtE8ZW+dLZrXrENeIzWWKw800U4=
-github.com/ykadowak/zerologlint v0.1.2/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
+github.com/ykadowak/zerologlint v0.1.3 h1:TLy1dTW3Nuc+YE3bYRPToG1Q9Ej78b5UUN6bjbGdxPE=
+github.com/ykadowak/zerologlint v0.1.3/go.mod h1:KaUskqF3e/v59oPmdq1U1DnKcuHokl2/K1U4pmIELKg=
github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
@@ -569,8 +569,8 @@ github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9dec
github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
-gitlab.com/bosi/decorder v0.2.3 h1:gX4/RgK16ijY8V+BRQHAySfQAb354T7/xQpDB2n10P0=
-gitlab.com/bosi/decorder v0.2.3/go.mod h1:9K1RB5+VPNQYtXtTDAzd2OEftsZb1oV0IrJrzChSdGE=
+gitlab.com/bosi/decorder v0.4.0 h1:HWuxAhSxIvsITcXeP+iIRg9d1cVfvVkmlF7M68GaoDY=
+gitlab.com/bosi/decorder v0.4.0/go.mod h1:xarnteyUoJiOTEldDysquWKTVDCKo2TOIOIibSuWqOg=
go-simpler.org/assert v0.5.0 h1:+5L/lajuQtzmbtEfh69sr5cRf2/xZzyJhFjoOz/PPqs=
go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU=
go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8=
@@ -578,8 +578,8 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
-go.tmz.dev/musttag v0.7.0 h1:QfytzjTWGXZmChoX0L++7uQN+yRCPfyFm+whsM+lfGc=
-go.tmz.dev/musttag v0.7.0/go.mod h1:oTFPvgOkJmp5kYL02S8+jrH0eLrBIl57rzWeA26zDEM=
+go.tmz.dev/musttag v0.7.1 h1:9lFmeSFnFfPuMq4IksHGomItE6NgKMNW2Nt2FPOhCfU=
+go.tmz.dev/musttag v0.7.1/go.mod h1:oJLkpR56EsIryktZJk/B0IroSMi37YWver47fibGh5U=
go.uber.org/atomic v1.7.0 h1:ADUqmZGgLDDfbSL9ZmPxKTybcoEYHgpYfELNoN+7hsw=
go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc=
go.uber.org/automaxprocs v1.5.2 h1:2LxUOGiR3O6tw8ui5sZa2LAaHnsviZdVOUZw4fvbnME=
@@ -599,8 +599,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw=
-golang.org/x/crypto v0.9.0 h1:LF6fAI+IutBocDJ2OT0Q1g8plpYljMZ4+lty+dsqw3g=
-golang.org/x/crypto v0.9.0/go.mod h1:yrmDGqONDYtNj3tH8X9dzUun2m2lzPa9ngI6/RUPGR0=
+golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
+golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@@ -617,8 +617,8 @@ golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea h1:vLCWI/yYrdEHyN2JzIzPO3aaQ
golang.org/x/exp v0.0.0-20230510235704-dd950f8aeaea/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w=
golang.org/x/exp/typeparams v0.0.0-20220428152302-39d4317da171/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/exp/typeparams v0.0.0-20230203172020-98cc5a0785f9/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
-golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2 h1:J74nGeMgeFnYQJN59eFwh06jX/V8g0lB7LWpjSLxtgU=
-golang.org/x/exp/typeparams v0.0.0-20230224173230-c95f2b4c22f2/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
+golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833 h1:jWGQJV4niP+CCmFW9ekjA9Zx8vYORzOUH2/Nl5WPuLQ=
+golang.org/x/exp/typeparams v0.0.0-20230307190834-24139beb5833/go.mod h1:AbB0pIl9nAr9wVwH+Z2ZpaocVmF5I4GyWCDIsVjR0bk=
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
@@ -652,8 +652,8 @@ golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
-golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk=
-golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
+golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc=
+golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
@@ -695,7 +695,7 @@ golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
-golang.org/x/net v0.10.0 h1:X2//UzNDwYmtCLn7To6G58Wr6f5ahEAQgKNzv9Y951M=
+golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -719,8 +719,8 @@ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
-golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI=
-golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
+golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@@ -781,16 +781,16 @@ golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
-golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU=
-golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
-golang.org/x/term v0.8.0 h1:n5xxQn2i3PC0yLAbjTpNT85q/Kgzcr2gIoX9OrJUols=
-golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
+golang.org/x/term v0.10.0 h1:3R7pNqamzBraeqj/Tj8qt1aQ2HpmlC+Cx/qL/7hn4/c=
+golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@@ -802,8 +802,8 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
-golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE=
-golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
+golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
+golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -878,8 +878,8 @@ golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
golang.org/x/tools v0.3.0/go.mod h1:/rWhSS2+zyEVwoJf8YAX6L2f0ntZ7Kn/mGgAWcipA5k=
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
-golang.org/x/tools v0.9.3 h1:Gn1I8+64MsuTb/HpH+LmQtNas23LhUVr3rYZ0eKuaMM=
-golang.org/x/tools v0.9.3/go.mod h1:owI94Op576fPu3cIGQeHs3joujW/2Oc6MtlxbF5dfNc=
+golang.org/x/tools v0.12.0 h1:YW6HUoUmYBpwSgyaGaZq1fHjrBjX1rlpZ54T6mu2kss=
+golang.org/x/tools v0.12.0/go.mod h1:Sc0INKfu04TlqNoRA1hgpFZbhYXHPr4V5DzpSBTPqQM=
golang.org/x/tools/go/pointer v0.1.0-deprecated h1:PwCkqv2FT35Z4MVxR/tUlvLoL0TkxDjShpBrE4p18Ho=
golang.org/x/tools/go/pointer v0.1.0-deprecated/go.mod h1:Jd+I2inNruJ+5VRdS+jU4S1t17z5y+UCCRa/eBRwilA=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
diff --git a/hack/verify-golangci-lint.sh b/hack/verify-golangci-lint.sh
index 7dd154ea05f..fa92a7edb91 100755
--- a/hack/verify-golangci-lint.sh
+++ b/hack/verify-golangci-lint.sh
@@ -48,6 +48,12 @@ PATH="${GOBIN}:${PATH}"
invocation=(./hack/verify-golangci-lint.sh "$@")
+# Disable warnings about the logcheck plugin using the old API
+# (https://github.com/golangci/golangci-lint/issues/4001).
+# Can be removed once logcheck gets updated to a newer release
+# which uses the new plugin API
+export GOLANGCI_LINT_HIDE_WARNING_ABOUT_PLUGIN_API_DEPRECATION=1
+
# The logcheck plugin currently has to be configured via env variables
# (https://github.com/golangci/golangci-lint/issues/1512).
#
|
1
|
Merge pull request #119937 from RyanAoh/kep-1860-dev
Make Kubernetes aware of the LoadBalancer behaviour
|
2023-08-18
| null | null |
2
|
Merge pull request #120025 from tzneal/remove-legacy-test-dependency
remove the legacy test dependency
|
2023-08-18
| null | null |
3
|
add aramase to sig-auth-encryption-at-rest-reviewers
Signed-off-by: Anish Ramasekar <[email protected]>
|
2023-08-18
| null |
index def06e4af8b..3c63423b183 100644
--- a/OWNERS_ALIASES
+++ b/OWNERS_ALIASES
@@ -63,6 +63,7 @@ aliases:
- smarterclayton
- enj
sig-auth-encryption-at-rest-reviewers:
+ - aramase
- enj
- lavalamp
- liggitt
|
4
|
Merge pull request #119874 from kannon92/pod-replacement-policy-typos
fix typos for pod replacement policy
|
2023-08-18
| null | null |
5
|
Merge pull request #119806 from enj/enj/i/delete_psp_api
Delete PSP API types and generated clients
|
2023-08-18
| null | null |
6
|
remove the legacy test dependency
This removes the import which added a bunch of apparently
old failing tests.
|
2023-08-18
| null |
index 417ef729b81..b1828a583ed 100644
--- a/test/e2e_node/seccompdefault_test.go
+++ b/test/e2e_node/seccompdefault_test.go
@@ -27,13 +27,19 @@ import (
v1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/uuid"
+ admissionapi "k8s.io/pod-security-admission/api"
+
kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
"k8s.io/kubernetes/test/e2e/framework"
e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output"
- "k8s.io/kubernetes/test/e2e/node"
- admissionapi "k8s.io/pod-security-admission/api"
)
+// SeccompProcStatusField is the field of /proc/$PID/status referencing the seccomp filter type.
+const SeccompProcStatusField = "Seccomp:"
+
+// ProcSelfStatusPath is the path to /proc/self/status.
+const ProcSelfStatusPath = "/proc/self/status"
+
// Serial because the test updates kubelet configuration.
var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly]", func() {
f := framework.NewDefaultFramework("seccompdefault-test")
@@ -54,7 +60,7 @@ var _ = SIGDescribe("SeccompDefault [Serial] [Feature:SeccompDefault] [LinuxOnly
{
Name: name,
Image: busyboxImage,
- Command: []string{"grep", node.SeccompProcStatusField, node.ProcSelfStatusPath},
+ Command: []string{"grep", SeccompProcStatusField, ProcSelfStatusPath},
SecurityContext: securityContext,
},
},
|
7
|
Merge pull request #119027 from MadhavJivrajani/go1.21-list-e
[prep for go1.21]: use -e in `go list`
|
2023-08-18
| null | null |
8
|
Merge pull request #119746 from SataQiu/cleanup-scheduler-20230803
using wait.PollUntilContextTimeout instead of deprecated wait.Poll for scheduler
|
2023-08-18
| null | null |
9
|
Merge pull request #119958 from SataQiu/drop-126-api-testdata
Drop v1.26.0 API testdata
|
2023-08-18
| null | null |
10
|
prep for go1.21: use -e in go list
For some reason, in go1.21, go list does not allow
importing main packages anymore, even if it is for
the sake of tracking dependencies (which is a valid
use case).
A suggestion to work around this is to use -e flag to
permit processing of erroneous packages. However, this
doesn't seem prudent.
Signed-off-by: Madhav Jivrajani <[email protected]>
|
2023-08-18
| null |
index 5977037cc4e..f1c46982fa0 100755
--- a/hack/update-vendor.sh
+++ b/hack/update-vendor.sh
@@ -277,7 +277,7 @@ while IFS= read -r repo; do
echo "=== computing imports for ${repo}"
go list all
echo "=== computing tools imports for ${repo}"
- go list -tags=tools all
+ go list -e -tags=tools all
}
# capture module dependencies
|
11
|
Merge pull request #119728 from pohly/ssa-forbid-extract-calls
SSA: prevent usage of Extract calls via forbidigo
|
2023-08-18
| null | null |
12
|
Merge pull request #119489 from carlory/cleanup-e2e-common-framework-equal
e2e_common: stop using deprecated framework.ExpectEqual
|
2023-08-18
| null | null |
14
|
Merge pull request #119982 from liggitt/automated-cherry-pick-of-#119977-upstream-release-1.28
Automated cherry pick of #119977: Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null | null |
15
|
Merge pull request #119562 from my-git9/proxyut
kubeadm: increase ut for cmd/kubeadm/app/phases/addons/proxy
|
2023-08-18
| null | null |
16
|
Merge pull request #119501 from Songjoy/cleanup-e2e-node-framework-equal
e2e_node: stop using deprecated framework.ExpectEqual
|
2023-08-18
| null | null |
17
|
Merge pull request #119097 from pacoxu/fix-eviction-pid
PIDPressure condition is triggered slow on CRI-O with large PID pressure/heavy load
|
2023-08-18
| null | null |
18
|
Merge pull request #119800 from jpbetz/cost-fix
Fixes CEL estimated cost to propagate result sizes correctly
|
2023-08-18
| null | null |
19
|
Merge pull request #119197 from saschagrunert/stop-container-runtime-err
Check dbus error on container runtime start/stop
|
2023-08-18
| null | null |
20
|
Merge pull request #119974 from tzneal/bump-busybox-test-version
bump the busybox test version to resolve test failures
|
2023-08-18
| null | null |
21
|
Merge pull request #119939 from dims/kubectl-lookup-host-in-kubeconfig-when-needed
[kubectl] Lookup Host from kubeconfig when needed
|
2023-08-18
| null | null |
22
|
Merge pull request #119880 from saschagrunert/seccomp-filter
Make seccomp status checks in e2e tests more robust
|
2023-08-18
| null | null |
23
|
Merge pull request #119860 from pohly/golangci-lint-update
golangci-lint update and support for Go 1.21
|
2023-08-18
| null | null |
24
|
Merge pull request #119966 from aojea/fixfix
e2e framework util subtle bug checking endpoints
|
2023-08-18
| null | null |
26
|
Merge pull request #119745 from tsmetana/fix-local-stress-flake
Local PV Stress test: don't fail on deleting missing PV
|
2023-08-18
| null | null |
27
|
Merge pull request #119654 from p0lyn0mial/upstream-watch-list-e2e-panic
e2e/apimachinery/watchlist: stop panicking when run against unsupported cluster/environment
|
2023-08-18
| null | null |
28
|
Merge pull request #119509 from tzneal/fix-describe-node-with-sidecars
kubectl: fix describe node output when sidecars are present
|
2023-08-18
| null | null |
29
|
Merge pull request #118619 from TommyStarK/gh_113832
dynamic resource allocation: reuse gRPC connection
|
2023-08-18
| null | null |
30
|
Merge pull request #119977 from liggitt/optional-gomaxprocs
Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null | null |
31
|
Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null |
index 66772f08a81..983ff368e25 100755
--- a/hack/lib/golang.sh
+++ b/hack/lib/golang.sh
@@ -556,20 +556,25 @@ kube::golang::setup_env() {
# This seems to matter to some tools
export GO15VENDOREXPERIMENT=1
+}
+kube::golang::setup_gomaxprocs() {
# GOMAXPROCS by default does not reflect the number of cpu(s) available
# when running in a container, please see https://github.com/golang/go/issues/33803
- if ! command -v ncpu >/dev/null 2>&1; then
- # shellcheck disable=SC2164
- pushd "${KUBE_ROOT}/hack/tools" >/dev/null
- GO111MODULE=on go install ./ncpu
- # shellcheck disable=SC2164
- popd >/dev/null
+ if [[ -z "${GOMAXPROCS:-}" ]]; then
+ if ! command -v ncpu >/dev/null 2>&1; then
+ # shellcheck disable=SC2164
+ pushd "${KUBE_ROOT}/hack/tools" >/dev/null
+ GO111MODULE=on go install ./ncpu || echo "Will not automatically set GOMAXPROCS"
+ # shellcheck disable=SC2164
+ popd >/dev/null
+ fi
+ if command -v ncpu >/dev/null 2>&1; then
+ GOMAXPROCS=$(ncpu)
+ export GOMAXPROCS
+ kube::log::status "Set GOMAXPROCS automatically to ${GOMAXPROCS}"
+ fi
fi
-
- GOMAXPROCS=${GOMAXPROCS:-$(ncpu)}
- export GOMAXPROCS
- kube::log::status "Setting GOMAXPROCS: ${GOMAXPROCS}"
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh
index 43dde0c740f..49e3e04ac71 100755
--- a/hack/make-rules/test-e2e-node.sh
+++ b/hack/make-rules/test-e2e-node.sh
@@ -18,6 +18,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh
index e9074678a8f..4aa72730d83 100755
--- a/hack/make-rules/test.sh
+++ b/hack/make-rules/test.sh
@@ -22,6 +22,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
32
|
bump the busybox test version to resolve test failures
- bump busybox version
- specify the path to /bin/sleep to avoid calling a new shell
builtin
|
2023-08-18
| null |
index 3dc786c1ee2..1043cad658c 100644
--- a/test/e2e_node/pods_lifecycle_termination_test.go
+++ b/test/e2e_node/pods_lifecycle_termination_test.go
@@ -113,7 +113,7 @@ func getSigkillTargetPod(podName string, ctnName string) *v1.Pod {
Command: []string{
"sh",
"-c",
- "trap \\"echo SIGTERM caught\\" SIGTERM SIGINT; touch /tmp/healthy; sleep 1000",
+ "trap \\"echo SIGTERM caught\\" SIGTERM SIGINT; touch /tmp/healthy; /bin/sleep 1000",
},
// Using readiness probe to guarantee signal handler registering finished
ReadinessProbe: &v1.Probe{
diff --git a/test/utils/image/manifest.go b/test/utils/image/manifest.go
index 578a0c0f4b4..d28ab6a46bb 100644
--- a/test/utils/image/manifest.go
+++ b/test/utils/image/manifest.go
@@ -238,7 +238,7 @@ func initImageConfigs(list RegistryList) (map[ImageID]Config, map[ImageID]Config
configs[AuthenticatedWindowsNanoServer] = Config{list.GcAuthenticatedRegistry, "windows-nanoserver", "v1"}
configs[APIServer] = Config{list.PromoterE2eRegistry, "sample-apiserver", "1.17.7"}
configs[AppArmorLoader] = Config{list.PromoterE2eRegistry, "apparmor-loader", "1.4"}
- configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.29-4"}
+ configs[BusyBox] = Config{list.PromoterE2eRegistry, "busybox", "1.36.1-1"}
configs[CudaVectorAdd] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "1.0"}
configs[CudaVectorAdd2] = Config{list.PromoterE2eRegistry, "cuda-vector-add", "2.3"}
configs[DistrolessIptables] = Config{list.BuildImageRegistry, "distroless-iptables", "v0.2.7"}
|
33
|
Make gomaxprocs install optional, limit to tests
|
2023-08-18
| null |
index 66772f08a81..983ff368e25 100755
--- a/hack/lib/golang.sh
+++ b/hack/lib/golang.sh
@@ -556,20 +556,25 @@ kube::golang::setup_env() {
# This seems to matter to some tools
export GO15VENDOREXPERIMENT=1
+}
+kube::golang::setup_gomaxprocs() {
# GOMAXPROCS by default does not reflect the number of cpu(s) available
# when running in a container, please see https://github.com/golang/go/issues/33803
- if ! command -v ncpu >/dev/null 2>&1; then
- # shellcheck disable=SC2164
- pushd "${KUBE_ROOT}/hack/tools" >/dev/null
- GO111MODULE=on go install ./ncpu
- # shellcheck disable=SC2164
- popd >/dev/null
+ if [[ -z "${GOMAXPROCS:-}" ]]; then
+ if ! command -v ncpu >/dev/null 2>&1; then
+ # shellcheck disable=SC2164
+ pushd "${KUBE_ROOT}/hack/tools" >/dev/null
+ GO111MODULE=on go install ./ncpu || echo "Will not automatically set GOMAXPROCS"
+ # shellcheck disable=SC2164
+ popd >/dev/null
+ fi
+ if command -v ncpu >/dev/null 2>&1; then
+ GOMAXPROCS=$(ncpu)
+ export GOMAXPROCS
+ kube::log::status "Set GOMAXPROCS automatically to ${GOMAXPROCS}"
+ fi
fi
-
- GOMAXPROCS=${GOMAXPROCS:-$(ncpu)}
- export GOMAXPROCS
- kube::log::status "Setting GOMAXPROCS: ${GOMAXPROCS}"
}
# This will take binaries from $GOPATH/bin and copy them to the appropriate
diff --git a/hack/make-rules/test-e2e-node.sh b/hack/make-rules/test-e2e-node.sh
index 43dde0c740f..49e3e04ac71 100755
--- a/hack/make-rules/test-e2e-node.sh
+++ b/hack/make-rules/test-e2e-node.sh
@@ -18,6 +18,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
diff --git a/hack/make-rules/test.sh b/hack/make-rules/test.sh
index e9074678a8f..4aa72730d83 100755
--- a/hack/make-rules/test.sh
+++ b/hack/make-rules/test.sh
@@ -22,6 +22,7 @@ KUBE_ROOT=$(dirname "${BASH_SOURCE[0]}")/../..
source "${KUBE_ROOT}/hack/lib/init.sh"
kube::golang::setup_env
+kube::golang::setup_gomaxprocs
# start the cache mutation detector by default so that cache mutators will be found
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
34
|
Merge pull request #119709 from charles-chenzz/fix_flaky
fix flaky test on dra TestPrepareResources/should_timeout
|
2023-08-18
| null | null |
35
|
Merge pull request #119819 from pohly/dra-performance-test-driver
dra test: enhance performance of test driver controller
|
2023-08-18
| null | null |
36
|
Merge pull request #119938 from my-git9/certlist-ut
kubeadm: increase ut for certs/certlist
|
2023-08-18
| null | null |
37
|
Merge pull request #119859 from SataQiu/fix-healthcheck-client-cert-20230809
kubeadm: remove 'system:masters' organization from etcd/healthcheck-client certificate
|
2023-08-18
| null | null |
38
|
e2e framework util subtle bug checking endpoints
Change-Id: Ied14efcb75a45e3bbd5f76d4ee4c89703161df54
|
2023-08-18
| null |
index 407c9e60d79..f10e3254c01 100644
--- a/test/e2e/framework/util.go
+++ b/test/e2e/framework/util.go
@@ -478,7 +478,7 @@ func isIPv6Endpoint(e *v1.Endpoints) bool {
continue
}
// Endpoints are single family, so it is enough to check only one address
- return netutils.IsIPv6String(sub.Addresses[0].IP)
+ return netutils.IsIPv6String(addr.IP)
}
}
// default to IPv4 an Endpoint without IP addresses
|
39
|
Merge pull request #119928 from aojea/fixe2e
e2e WaitForServiceEndpointsNum take into account the address family
|
2023-08-18
| null | null |
41
|
Merge pull request #119914 from luohaha3123/job-feature
Job: Change job controller methods receiver to pointer
|
2023-08-18
| null | null |
42
|
Merge pull request #119907 from Hii-Arpit/Hii-Arpit-Fixing-Broken-Link
Fixing the "Service Account Token" link in the readme
|
2023-08-18
| null | null |
43
|
Merge pull request #119904 from tenzen-y/replace-deprecated-workqueue-lib
Job: Replace deprecated workqueue function with supported one
|
2023-08-18
| null | null |
44
|
Merge pull request #119890 from tzneal/containers-lifecycle-flake
crio: increase test buffer to eliminate test flakes
|
2023-08-18
| null | null |
45
|
Merge pull request #119844 from enj/enj/i/upgrade_regex
wsstream: use a single approach to detect connection upgrade
|
2023-08-18
| null | null |
46
|
Merge pull request #119825 from Jefftree/add-gv
Move adding GroupVersion log until after an update is confirmed
|
2023-08-18
| null | null |
47
|
Merge pull request #119796 from sttts/sttts-caches-populated
client-go: log proper 'caches populated' message, with type and source and only once
|
2023-08-18
| null | null |
48
|
Merge pull request #119795 from sttts/sttts-httplog-impersonation
apiserver/httplog: pretty up impersonation output
|
2023-08-18
| null | null |
49
|
Merge pull request #119794 from aojea/jsonlogkube
implement Stringer for podActions
|
2023-08-18
| null | null |
60
|
kubeadm: restore and generalize the TestMigrateOldConfig test
The test required two APIs to be available to test for migration.
Keep it simple and use a variable "gv" on top of the function body
to easily swap the version to be tested once an old API is deleted.
e.g. currently v1beta3 is the "old" API, v1beta4 is the "new" one.
Ultimately, this test only makes sure that the expected kinds are
available post migration.
|
2023-08-18
| null |
index c713984b22a..879d04aef7f 100644
--- a/cmd/kubeadm/app/util/config/common_test.go
+++ b/cmd/kubeadm/app/util/config/common_test.go
@@ -21,13 +21,16 @@ import (
"reflect"
"testing"
+ "github.com/lithammer/dedent"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/version"
apimachineryversion "k8s.io/apimachinery/pkg/version"
kubeadmapi "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm"
- kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
+ kubeadmapiv1old "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta3"
+ kubeadmapiv1 "k8s.io/kubernetes/cmd/kubeadm/app/apis/kubeadm/v1beta4"
"k8s.io/kubernetes/cmd/kubeadm/app/constants"
+ kubeadmutil "k8s.io/kubernetes/cmd/kubeadm/app/util"
)
const KubeadmGroupName = "kubeadm.k8s.io"
@@ -213,209 +216,228 @@ func TestVerifyAPIServerBindAddress(t *testing.T) {
}
}
-// TODO: re-include TestMigrateOldConfigFromFile once a new API version is added after v1beta3.
-// see <link-to-commit-foo> of how this unit test function
-// looked before it was removed with the removal of v1beta2.
-// func TestMigrateOldConfigFromFile(t *testing.T) {
-// tests := []struct {
-// desc string
-// oldCfg string
-// expectedKinds []string
-// expectErr bool
-// }{
-// {
-// desc: "empty file produces empty result",
-// oldCfg: "",
-// expectErr: false,
-// },
-// {
-// desc: "bad config produces error",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectErr: true,
-// },
-// {
-// desc: "InitConfiguration only gets migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "ClusterConfiguration only gets migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: ClusterConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "JoinConfiguration only gets migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Init + Cluster Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: ClusterConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Init + Join Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Cluster + Join Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: ClusterConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "Init + Cluster + Join Configurations are migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: ClusterConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// {
-// desc: "component configs are not migrated",
-// oldCfg: dedent.Dedent(fmt.Sprintf(`
-// apiVersion: %s
-// kind: InitConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: ClusterConfiguration
-// ---
-// apiVersion: %[1]s
-// kind: JoinConfiguration
-// discovery:
-// bootstrapToken:
-// token: abcdef.0123456789abcdef
-// apiServerEndpoint: kube-apiserver:6443
-// unsafeSkipCAVerification: true
-// ---
-// apiVersion: kubeproxy.config.k8s.io/v1alpha1
-// kind: KubeProxyConfiguration
-// ---
-// apiVersion: kubelet.config.k8s.io/v1beta1
-// kind: KubeletConfiguration
-// `, kubeadmapiv1old.SchemeGroupVersion.String())),
-// expectedKinds: []string{
-// constants.InitConfigurationKind,
-// constants.ClusterConfigurationKind,
-// constants.JoinConfigurationKind,
-// },
-// expectErr: false,
-// },
-// }
+// NOTE: do not delete this test once an older API is removed and there is only one API left.
+// Update the inline "gv" and "gvExperimental" variables, to have the GroupVersion String of
+// the API to be tested. If there are no experimental APIs make "gvExperimental" point to
+// an non-experimental API.
+func TestMigrateOldConfig(t *testing.T) {
+ var (
+ gv = kubeadmapiv1old.SchemeGroupVersion.String()
+ gvExperimental = kubeadmapiv1.SchemeGroupVersion.String()
+ )
+ tests := []struct {
+ desc string
+ oldCfg string
+ expectedKinds []string
+ expectErr bool
+ allowExperimental bool
+ }{
+ {
+ desc: "empty file produces empty result",
+ oldCfg: "",
+ expectErr: false,
+ },
+ {
+ desc: "bad config produces error",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ `, gv)),
+ expectErr: true,
+ },
+ {
+ desc: "InitConfiguration only gets migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "ClusterConfiguration only gets migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: ClusterConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "JoinConfiguration only gets migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Init + Cluster Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: ClusterConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Init + Join Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Cluster + Join Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: ClusterConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "Init + Cluster + Join Configurations are migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: ClusterConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "component configs are not migrated",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: InitConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: ClusterConfiguration
+ ---
+ apiVersion: %[1]s
+ kind: JoinConfiguration
+ discovery:
+ bootstrapToken:
+ token: abcdef.0123456789abcdef
+ apiServerEndpoint: kube-apiserver:6443
+ unsafeSkipCAVerification: true
+ ---
+ apiVersion: kubeproxy.config.k8s.io/v1alpha1
+ kind: KubeProxyConfiguration
+ ---
+ apiVersion: kubelet.config.k8s.io/v1beta1
+ kind: KubeletConfiguration
+ `, gv)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ constants.JoinConfigurationKind,
+ },
+ expectErr: false,
+ },
+ {
+ desc: "ClusterConfiguration gets migrated from experimental API",
+ oldCfg: dedent.Dedent(fmt.Sprintf(`
+ apiVersion: %s
+ kind: ClusterConfiguration
+ `, gvExperimental)),
+ expectedKinds: []string{
+ constants.InitConfigurationKind,
+ constants.ClusterConfigurationKind,
+ },
+ allowExperimental: true,
+ expectErr: false,
+ },
+ }
-// for _, test := range tests {
-// t.Run(test.desc, func(t *testing.T) {
-// b, err := MigrateOldConfig([]byte(test.oldCfg))
-// if test.expectErr {
-// if err == nil {
-// t.Fatalf("unexpected success:\\n%s", b)
-// }
-// } else {
-// if err != nil {
-// t.Fatalf("unexpected failure: %v", err)
-// }
-// gvks, err := kubeadmutil.GroupVersionKindsFromBytes(b)
-// if err != nil {
-// t.Fatalf("unexpected error returned by GroupVersionKindsFromBytes: %v", err)
-// }
-// if len(gvks) != len(test.expectedKinds) {
-// t.Fatalf("length mismatch between resulting gvks and expected kinds:\\n\\tlen(gvks)=%d\\n\\tlen(expectedKinds)=%d",
-// len(gvks), len(test.expectedKinds))
-// }
-// for _, expectedKind := range test.expectedKinds {
-// if !kubeadmutil.GroupVersionKindsHasKind(gvks, expectedKind) {
-// t.Fatalf("migration failed to produce config kind: %s", expectedKind)
-// }
-// }
-// }
-// })
-// }
-// }
+ for _, test := range tests {
+ t.Run(test.desc, func(t *testing.T) {
+ b, err := MigrateOldConfig([]byte(test.oldCfg), test.allowExperimental)
+ if test.expectErr {
+ if err == nil {
+ t.Fatalf("unexpected success:\\n%s", b)
+ }
+ } else {
+ if err != nil {
+ t.Fatalf("unexpected failure: %v", err)
+ }
+ gvks, err := kubeadmutil.GroupVersionKindsFromBytes(b)
+ if err != nil {
+ t.Fatalf("unexpected error returned by GroupVersionKindsFromBytes: %v", err)
+ }
+ if len(gvks) != len(test.expectedKinds) {
+ t.Fatalf("length mismatch between resulting gvks and expected kinds:\\n\\tlen(gvks)=%d\\n\\tlen(expectedKinds)=%d",
+ len(gvks), len(test.expectedKinds))
+ }
+ for _, expectedKind := range test.expectedKinds {
+ if !kubeadmutil.GroupVersionKindsHasKind(gvks, expectedKind) {
+ t.Fatalf("migration failed to produce config kind: %s", expectedKind)
+ }
+ }
+ }
+ })
+ }
+}
func TestIsKubeadmPrereleaseVersion(t *testing.T) {
validVersionInfo := &apimachineryversion.Info{Major: "1", GitVersion: "v1.23.0-alpha.1"}
|
65
|
kubectl explain should work for both cluster and namespace resources and without a GET method
|
2023-08-18
| null |
index f67a9f4ca36..4c5e1c62be5 100644
--- a/staging/src/k8s.io/kubectl/pkg/explain/v2/funcs.go
+++ b/staging/src/k8s.io/kubectl/pkg/explain/v2/funcs.go
@@ -185,6 +185,9 @@ func WithBuiltinTemplateFuncs(tmpl *template.Template) *template.Template {
return copyDict, nil
},
+ "list": func(values ...any) ([]any, error) {
+ return values, nil
+ },
"add": func(value, operand int) int {
return value + operand
},
diff --git a/staging/src/k8s.io/kubectl/pkg/explain/v2/templates/batch.k8s.io_v1.json b/staging/src/k8s.io/kubectl/pkg/explain/v2/templates/batch.k8s.io_v1.json
new file mode 100644
index 00000000000..a6b9b2eff50
--- /dev/null
+++ b/staging/src/k8s.io/kubectl/pkg/explain/v2/templates/batch.k8s.io_v1.json
@@ -0,0 +1,9106 @@
+{
+ "openapi": "3.0.0",
+ "info": {
+ "title": "Kubernetes",
+ "version": "v1.27.1"
+ },
+ "paths": {
+ "/apis/batch/v1/": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "get available resources",
+ "operationId": "getBatchV1APIResources",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ }
+ }
+ },
+ "/apis/batch/v1/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind CronJob",
+ "operationId": "listBatchV1CronJobForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind Job",
+ "operationId": "listBatchV1JobForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind CronJob",
+ "operationId": "listBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "post": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "create a CronJob",
+ "operationId": "createBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete collection of CronJob",
+ "operationId": "deleteBatchV1CollectionNamespacedCronJob",
+ "parameters": [
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read the specified CronJob",
+ "operationId": "readBatchV1NamespacedCronJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace the specified CronJob",
+ "operationId": "replaceBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete a CronJob",
+ "operationId": "deleteBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update the specified CronJob",
+ "operationId": "patchBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the CronJob",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read status of the specified CronJob",
+ "operationId": "readBatchV1NamespacedCronJobStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace status of the specified CronJob",
+ "operationId": "replaceBatchV1NamespacedCronJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update status of the specified CronJob",
+ "operationId": "patchBatchV1NamespacedCronJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the CronJob",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind Job",
+ "operationId": "listBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "post": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "create a Job",
+ "operationId": "createBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete collection of Job",
+ "operationId": "deleteBatchV1CollectionNamespacedJob",
+ "parameters": [
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/jobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read the specified Job",
+ "operationId": "readBatchV1NamespacedJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace the specified Job",
+ "operationId": "replaceBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
|
67
|
Paginate within DeleteCollection call.
|
2023-08-18
| null |
index 55f06f7972b..353e5532c1e 100644
--- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go
+++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store.go
@@ -1143,6 +1143,11 @@ func (e *Store) DeleteReturnsDeletedObject() bool {
return e.ReturnDeletedObject
}
+// deleteCollectionPageSize is the size of the page used when
+// listing objects from storage during DeleteCollection calls.
+// It's a variable to make allow overwriting in tests.
+var deleteCollectionPageSize = int64(10000)
+
// DeleteCollection removes all items returned by List with a given ListOptions from storage.
//
// DeleteCollection is currently NOT atomic. It can happen that only subset of objects
@@ -1155,27 +1160,11 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
listOptions = listOptions.DeepCopy()
}
- listObj, err := e.List(ctx, listOptions)
- if err != nil {
- return nil, err
- }
- items, err := meta.ExtractList(listObj)
- if err != nil {
- return nil, err
- }
- if len(items) == 0 {
- // Nothing to delete, return now
- return listObj, nil
- }
- // Spawn a number of goroutines, so that we can issue requests to storage
- // in parallel to speed up deletion.
- // It is proportional to the number of items to delete, up to
- // DeleteCollectionWorkers (it doesn't make much sense to spawn 16
- // workers to delete 10 items).
+ itemsLock := sync.RWMutex{}
+ var items []runtime.Object
+
+ // TODO(wojtek-t): Decide if we don't want to start workers more opportunistically.
workersNumber := e.DeleteCollectionWorkers
- if workersNumber > len(items) {
- workersNumber = len(items)
- }
if workersNumber < 1 {
workersNumber = 1
}
@@ -1194,7 +1183,9 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
defer wg.Done()
for index := range toProcess {
+ itemsLock.RLock()
accessor, err := meta.Accessor(items[index])
+ itemsLock.RUnlock()
if err != nil {
errs <- err
return
@@ -1220,20 +1211,86 @@ func (e *Store) DeleteCollection(ctx context.Context, deleteValidation rest.Vali
close(workersExited)
}()
- func() {
+ hasLimit := listOptions.Limit > 0
+ if listOptions.Limit == 0 {
+ listOptions.Limit = deleteCollectionPageSize
+ }
+
+ // Paginate the list request and throw all items into workers.
+ listObj, err := func() (runtime.Object, error) {
defer close(toProcess)
- for i := 0; i < len(items); i++ {
+ processedItems := 0
+ var originalList runtime.Object
+ for {
select {
- case toProcess <- i:
- case <-workersExited:
- klog.V(4).InfoS("workers already exited, and there are some items waiting to be processed", "finished", i, "total", len(items))
- return
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+
+ listObj, err := e.List(ctx, listOptions)
+ if err != nil {
+ return nil, err
+ }
+
+ newItems, err := meta.ExtractList(listObj)
+ if err != nil {
+ return nil, err
+ }
+ itemsLock.Lock()
+ items = append(items, newItems...)
+ itemsLock.Unlock()
+
+ for i := 0; i < len(newItems); i++ {
+ select {
+ case toProcess <- processedItems + i:
+ case <-workersExited:
+ klog.V(4).InfoS("workers already exited, and there are some items waiting to be processed", "queued/finished", i, "total", processedItems+len(newItems))
+ // Try to propagate an error from the workers if possible.
+ select {
+ case err := <-errs:
+ return nil, err
+ default:
+ return nil, fmt.Errorf("all DeleteCollection workers exited")
+ }
+ }
+ }
+ processedItems += len(newItems)
+
+ // If the original request was setting the limit, finish after running it.
+ if hasLimit {
+ return listObj, nil
+ }
+
+ if originalList == nil {
+ originalList = listObj
+ meta.SetList(originalList, nil)
}
+
+ // If there are no more items, return the list.
+ m, err := meta.ListAccessor(listObj)
+ if err != nil {
+ return nil, err
+ }
+ if len(m.GetContinue()) == 0 {
+ itemsLock.Lock()
+ meta.SetList(originalList, items)
+ itemsLock.Unlock()
+ return originalList, nil
+ }
+
+ // Set up the next loop.
+ listOptions.Continue = m.GetContinue()
+ listOptions.ResourceVersion = ""
+ listOptions.ResourceVersionMatch = ""
}
}()
+ if err != nil {
+ return nil, err
+ }
- // Wait for all workers to exist.
+ // Wait for all workers to exit.
<-workersExited
select {
diff --git a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go
index 390f794065e..c05940a6f2d 100644
--- a/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go
+++ b/staging/src/k8s.io/apiserver/pkg/registry/generic/registry/store_test.go
@@ -25,6 +25,7 @@ import (
"strconv"
"strings"
"sync"
+ "sync/atomic"
"testing"
"time"
@@ -2020,19 +2021,34 @@ func TestStoreDeletionPropagation(t *testing.T) {
}
}
-func TestStoreDeleteCollection(t *testing.T) {
- podA := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "foo"}}
- podB := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: "bar"}}
+type storageWithCounter struct {
+ storage.Interface
+
+ listCounter int64
+}
+func (s *storageWithCounter) GetList(ctx context.Context, key string, opts storage.ListOptions, listObj runtime.Object) error {
+ atomic.AddInt64(&s.listCounter, 1)
+ return s.Interface.GetList(ctx, key, opts, listObj)
+}
+
+func TestStoreDeleteCollection(t *testing.T) {
testContext := genericapirequest.WithNamespace(genericapirequest.NewContext(), "test")
destroyFunc, registry := NewTestGenericStoreRegistry(t)
defer destroyFunc()
- if _, err := registry.Create(testContext, podA, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}); err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if _, err := registry.Create(testContext, podB, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}); err != nil {
- t.Errorf("Unexpected error: %v", err)
+ // Overwrite the underlying storage interface so that it counts GetList calls
+ // and reduce the default page size to 2.
+ storeWithCounter := &storageWithCounter{Interface: registry.Storage.Storage}
+ registry.Storage.Storage = storeWithCounter
+ deleteCollectionPageSize = 2
+
+ numPods := 10
+ for i := 0; i < numPods; i++ {
+ pod := &example.Pod{ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("foo-%d", i)}}
+ if _, err := registry.Create(testContext, pod, rest.ValidateAllObjectFunc, &metav1.CreateOptions{}); err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
}
// Delete all pods.
@@ -2041,15 +2057,18 @@ func TestStoreDeleteCollection(t *testing.T) {
t.Fatalf("Unexpected error: %v", err)
}
deletedPods := deleted.(*example.PodList)
- if len(deletedPods.Items) != 2 {
- t.Errorf("Unexpected number of pods deleted: %d, expected: 3", len(deletedPods.Items))
+ if len(deletedPods.Items) != numPods {
+ t.Errorf("Unexpected number of pods deleted: %d, expected: %d", len(deletedPods.Items), numPods)
}
-
- if _, err := registry.Get(testContext, podA.Name, &metav1.GetOptions{}); !errors.IsNotFound(err) {
- t.Errorf("Unexpected error: %v", err)
+ expectedCalls := (int64(numPods) + deleteCollectionPageSize - 1) / deleteCollectionPageSize
+ if listCalls := atomic.LoadInt64(&storeWithCounter.listCounter); listCalls != expectedCalls {
+ t.Errorf("Unexpected number of list calls: %d, expected: %d", listCalls, expectedCalls)
}
- if _, err := registry.Get(testContext, podB.Name, &metav1.GetOptions{}); !errors.IsNotFound(err) {
- t.Errorf("Unexpected error: %v", err)
+
+ for i := 0; i < numPods; i++ {
+ if _, err := registry.Get(testContext, fmt.Sprintf("foo-%d", i), &metav1.GetOptions{}); !errors.IsNotFound(err) {
+ t.Errorf("Unexpected error: %v", err)
+ }
}
}
|
70
|
scheduler_perf: add TestScheduling integration test
This runs workloads that are labeled as "integration-test". The apiserver and
scheduler are only started once per unique configuration, followed by each
workload using that configuration. This makes execution faster. In contrast to
benchmarking, we care less about starting with a clean slate for each test.
|
2023-08-18
| null |
index 91cf677e4ad..618e48a3be4 100644
--- a/test/integration/scheduler_perf/README.md
+++ b/test/integration/scheduler_perf/README.md
@@ -100,3 +100,15 @@ performance.
During interactive debugging sessions it is possible to enable per-test output
via -use-testing-log.
+
+## Integration tests
+
+To run integration tests, use:
+```
+make test-integration WHAT=./test/integration/scheduler_perf KUBE_TEST_ARGS=-use-testing-log
+```
+
+Integration testing uses the same `config/performance-config.yaml` as
+benchmarking. By default, workloads labeled as `integration-test` are executed
+as part of integration testing. `-test-scheduling-label-filter` can be used to
+change that.
diff --git a/test/integration/scheduler_perf/config/performance-config.yaml b/test/integration/scheduler_perf/config/performance-config.yaml
index 1a5e82f6826..6d2d0e4ac2e 100644
--- a/test/integration/scheduler_perf/config/performance-config.yaml
+++ b/test/integration/scheduler_perf/config/performance-config.yaml
@@ -1,3 +1,17 @@
+# The following labels are used in this file:
+# - fast: short execution time, ideally less than 30 seconds
+# - integration-test: used to select workloads that
+# run in pull-kubernetes-integration. Choosing those tests
+# is a tradeoff between code coverage and overall runtime.
+# - performance: used to select workloads that run
+# in ci-benchmark-scheduler-perf. Such workloads
+# must run long enough (ideally, longer than 10 seconds)
+# to provide meaningful samples for the pod scheduling
+# rate.
+#
+# Combining "performance" and "fast" selects suitable workloads for a local
+# before/after comparisons with benchstat.
+
- name: SchedulingBasic
defaultPodTemplatePath: config/pod-default.yaml
workloadTemplate:
@@ -10,7 +24,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
- labels: [fast]
+ labels: [integration-test, fast]
params:
initNodes: 500
initPods: 500
@@ -39,7 +53,7 @@
namespace: sched-1
workloads:
- name: 500Nodes
- labels: [fast]
+ labels: [integration-test, fast]
params:
initNodes: 500
initPods: 100
@@ -161,7 +175,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
- labels: [fast]
+ labels: [integration-test, fast]
params:
initNodes: 500
initPods: 500
@@ -223,7 +237,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
- labels: [fast]
+ labels: [integration-test, fast]
params:
initNodes: 500
initPods: 500
@@ -308,7 +322,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
- labels: [fast]
+ labels: [integration-test, fast]
params:
initNodes: 500
initPods: 1000
@@ -386,7 +400,7 @@
collectMetrics: true
workloads:
- name: 500Nodes
- labels: [fast]
+ labels: [integration-test, fast]
params:
initNodes: 500
initPods: 200
@@ -504,7 +518,7 @@
collectMetrics: true
workloads:
- name: 1000Nodes
- labels: [fast]
+ labels: [integration-test, fast]
params:
initNodes: 1000
measurePods: 1000
@@ -734,6 +748,7 @@
collectMetrics: true
workloads:
- name: fast
+ labels: [integration-test, fast]
params:
# This testcase runs through all code paths without
# taking too long overall.
@@ -743,7 +758,7 @@
measurePods: 10
maxClaimsPerNode: 10
- name: 2000pods_100nodes
- labels: [performance,fast]
+ labels: [performance, fast]
params:
# In this testcase, the number of nodes is smaller
# than the limit for the PodScheduling slices.
diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go
index bd02232eeee..619d46bc39a 100644
--- a/test/integration/scheduler_perf/scheduler_perf_test.go
+++ b/test/integration/scheduler_perf/scheduler_perf_test.go
@@ -30,6 +30,7 @@ import (
"testing"
"time"
+ "github.com/google/go-cmp/cmp"
"github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
@@ -43,6 +44,7 @@ import (
utilfeature "k8s.io/apiserver/pkg/util/feature"
cacheddiscovery "k8s.io/client-go/discovery/cached"
"k8s.io/client-go/dynamic"
+ "k8s.io/client-go/informers"
coreinformers "k8s.io/client-go/informers/core/v1"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/client-go/restmapper"
@@ -128,7 +130,7 @@ type testCase struct {
Workloads []*workload
// SchedulerConfigPath is the path of scheduler configuration
// Optional
- SchedulerConfigPath *string
+ SchedulerConfigPath string
// Default path to spec file describing the pods to create.
// This path can be overridden in createPodsOp by setting PodTemplatePath .
// Optional
@@ -640,7 +642,7 @@ func initTestOutput(tb testing.TB) io.Writer {
return output
}
-var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-)")
+var perfSchedulingLabelFilter = flag.String("perf-scheduling-label-filter", "performance", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling")
func BenchmarkPerfScheduling(b *testing.B) {
testCases, err := getTestCases(configFile)
@@ -699,7 +701,8 @@ func BenchmarkPerfScheduling(b *testing.B) {
for feature, flag := range tc.FeatureGates {
defer featuregatetesting.SetFeatureGateDuringTest(b, utilfeature.DefaultFeatureGate, feature, flag)()
}
- results := runWorkload(ctx, b, tc, w, false)
+ informerFactory, client, dyncClient := setupClusterForWorkload(ctx, b, tc.SchedulerConfigPath, tc.FeatureGates)
+ results := runWorkload(ctx, b, tc, w, informerFactory, client, dyncClient, false)
dataItems.DataItems = append(dataItems.DataItems, results...)
if len(results) > 0 {
@@ -737,6 +740,95 @@ func BenchmarkPerfScheduling(b *testing.B) {
}
}
+var testSchedulingLabelFilter = flag.String("test-scheduling-label-filter", "integration-test", "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by TestScheduling")
+
+func TestScheduling(t *testing.T) {
+ testCases, err := getTestCases(configFile)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err = validateTestCases(testCases); err != nil {
+ t.Fatal(err)
+ }
+
+ // Check for leaks at the very end.
+ framework.GoleakCheck(t)
+
+ // All integration test cases share the same etcd, similar to
+ // https://github.com/kubernetes/kubernetes/blob/18d05b646d09b2971dc5400bc288062b0414e8cf/test/integration/framework/etcd.go#L186-L222.
+ framework.StartEtcd(t, nil)
+
+ // Workloads with the same configuration share the same apiserver. For that
+ // we first need to determine what those different configs are.
+ var configs []schedulerConfig
+ for _, tc := range testCases {
+ tcEnabled := false
+ for _, w := range tc.Workloads {
+ if enabled(*testSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
+ tcEnabled = true
+ break
+ }
+ }
+ if !tcEnabled {
+ continue
+ }
+ exists := false
+ for _, config := range configs {
+ if config.equals(tc) {
+ exists = true
+ break
+ }
+ }
+ if !exists {
+ configs = append(configs, schedulerConfig{schedulerConfigPath: tc.SchedulerConfigPath, featureGates: tc.FeatureGates})
+ }
+ }
+ for _, config := range configs {
+ // Not a sub test because we don't have a good name for it.
+ func() {
+ _, ctx := ktesting.NewTestContext(t)
+ // No timeout here because the `go test -timeout` will ensure that
+ // the test doesn't get stuck forever.
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+
+ for feature, flag := range config.featureGates {
+ defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, feature, flag)()
+ }
+ informerFactory, client, dynClient := setupClusterForWorkload(ctx, t, config.schedulerConfigPath, config.featureGates)
+
+ for _, tc := range testCases {
+ if !config.equals(tc) {
+ // Runs with some other config.
+ continue
+ }
+
+ t.Run(tc.Name, func(t *testing.T) {
+ for _, w := range tc.Workloads {
+ t.Run(w.Name, func(t *testing.T) {
+ if !enabled(*testSchedulingLabelFilter, append(tc.Labels, w.Labels...)...) {
+ t.Skipf("disabled by label filter %q", *testSchedulingLabelFilter)
+ }
+ _, ctx := ktesting.NewTestContext(t)
+ runWorkload(ctx, t, tc, w, informerFactory, client, dynClient, true)
+ })
+ }
+ })
+ }
+ }()
+ }
+}
+
+type schedulerConfig struct {
+ schedulerConfigPath string
+ featureGates map[featuregate.Feature]bool
+}
+
+func (c schedulerConfig) equals(tc *testCase) bool {
+ return c.schedulerConfigPath == tc.SchedulerConfigPath &&
+ cmp.Equal(c.featureGates, tc.FeatureGates)
+}
+
func loadSchedulerConfig(file string) (*config.KubeSchedulerConfiguration, error) {
data, err := os.ReadFile(file)
if err != nil {
@@ -753,16 +845,16 @@ func loadSchedulerConfig(file string) (*config.KubeSchedulerConfiguration, error
return nil, fmt.Errorf("couldn't decode as KubeSchedulerConfiguration, got %s: ", gvk)
}
-func unrollWorkloadTemplate(b *testing.B, wt []op, w *workload) []op {
+func unrollWorkloadTemplate(tb testing.TB, wt []op, w *workload) []op {
var unrolled []op
for opIndex, o := range wt {
realOp, err := o.realOp.patchParams(w)
if err != nil {
- b.Fatalf("op %d: %v", opIndex, err)
+ tb.Fatalf("op %d: %v", opIndex, err)
}
switch concreteOp := realOp.(type) {
case *createPodSetsOp:
- b.Logf("Creating %d pod sets %s", concreteOp.Count, concreteOp.CountParam)
+ tb.Logf("Creating %d pod sets %s", concreteOp.Count, concreteOp.CountParam)
for i := 0; i < concreteOp.Count; i++ {
copy := concreteOp.CreatePodsOp
ns := fmt.Sprintf("%s-%d", concreteOp.NamespacePrefix, i)
@@ -776,28 +868,43 @@ func unrollWorkloadTemplate(b *testing.B, wt []op, w *workload) []op {
return unrolled
}
-func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, cleanup bool) []DataItem {
- start := time.Now()
- b.Cleanup(func() {
- duration := time.Now().Sub(start)
- // This includes startup and shutdown time and thus does not
- // reflect scheduling performance. It's useful to get a feeling
- // for how long each workload runs overall.
- b.ReportMetric(duration.Seconds(), "runtime_seconds")
- })
-
+func setupClusterForWorkload(ctx context.Context, tb testing.TB, configPath string, featureGates map[featuregate.Feature]bool) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
var cfg *config.KubeSchedulerConfiguration
var err error
- if tc.SchedulerConfigPath != nil {
- cfg, err = loadSchedulerConfig(*tc.SchedulerConfigPath)
+ if configPath != "" {
+ cfg, err = loadSchedulerConfig(configPath)
if err != nil {
- b.Fatalf("error loading scheduler config file: %v", err)
+ tb.Fatalf("error loading scheduler config file: %v", err)
}
if err = validation.ValidateKubeSchedulerConfiguration(cfg); err != nil {
- b.Fatalf("validate scheduler config file failed: %v", err)
+ tb.Fatalf("validate scheduler config file failed: %v", err)
}
}
- informerFactory, client, dynClient := mustSetupCluster(ctx, b, cfg, tc.FeatureGates)
+ return mustSetupCluster(ctx, tb, cfg, featureGates)
+}
+
+func runWorkload(ctx context.Context, tb testing.TB, tc *testCase, w *workload, informerFactory informers.SharedInformerFactory, client clientset.Interface, dynClient dynamic.Interface, cleanup bool) []DataItem {
+ b, benchmarking := tb.(*testing.B)
+ if benchmarking {
+ start := time.Now()
+ b.Cleanup(func() {
+ duration := time.Since(start)
+ // This includes startup and shutdown time and thus does not
+ // reflect scheduling performance. It's useful to get a feeling
+ // for how long each workload runs overall.
+ b.ReportMetric(duration.Seconds(), "runtime_seconds")
+ })
+ }
+
+ // Disable error checking of the sampling interval length in the
+ // throughput collector by default. When running benchmarks, report
+ // it as test failure when samples are not taken regularly.
+ var throughputErrorMargin float64
+ if benchmarking {
+ // TODO: To prevent the perf-test failure, we increased the error margin, if still not enough
+ // one day, we should think of another approach to avoid this trick.
+ throughputErrorMargin = 30
+ }
// Additional informers needed for testing. The pod informer was
// already created before (scheduler.NewInformerFactory) and the
@@ -820,45 +927,45 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
if cleanup {
// This must run before controllers get shut down.
- defer cleanupWorkload(ctx, b, tc, client, numPodsScheduledPerNamespace)
+ defer cleanupWorkload(ctx, tb, tc, client, numPodsScheduledPerNamespace)
}
- for opIndex, op := range unrollWorkloadTemplate(b, tc.WorkloadTemplate, w) {
+ for opIndex, op := range unrollWorkloadTemplate(tb, tc.WorkloadTemplate, w) {
realOp, err := op.realOp.patchParams(w)
if err != nil {
- b.Fatalf("op %d: %v", opIndex, err)
+ tb.Fatalf("op %d: %v", opIndex, err)
}
select {
case <-ctx.Done():
- b.Fatalf("op %d: %v", opIndex, ctx.Err())
+ tb.Fatalf("op %d: %v", opIndex, ctx.Err())
default:
}
switch concreteOp := realOp.(type) {
case *createNodesOp:
nodePreparer, err := getNodePreparer(fmt.Sprintf("node-%d-", opIndex), concreteOp, client)
if err != nil {
- b.Fatalf("op %d: %v", opIndex, err)
+ tb.Fatalf("op %d: %v", opIndex, err)
}
if err := nodePreparer.PrepareNodes(ctx, nextNodeIndex); err != nil {
- b.Fatalf("op %d: %v", opIndex, err)
+ tb.Fatalf("op %d: %v", opIndex, err)
}
if cleanup {
- b.Cleanup(func() {
+ defer func() {
if err := nodePreparer.CleanupNodes(ctx); err != nil {
- b.Fatalf("failed to clean up nodes, error: %v", err)
+ tb.Fatalf("failed to clean up nodes, error: %v", err)
}
- })
+ }()
}
nextNodeIndex += concreteOp.Count
case *createNamespacesOp:
- nsPreparer, err := newNamespacePreparer(concreteOp, client, b)
+ nsPreparer, err := newNamespacePreparer(concreteOp, client, tb)
if err != nil {
- b.Fatalf("op %d: %v", opIndex, err)
+ tb.Fatalf("op %d: %v", opIndex, err)
}
if err := nsPreparer.prepare(ctx); err != nil {
nsPreparer.cleanup(ctx)
- b.Fatalf("op %d: %v", opIndex, err)
+ tb.Fatalf("op %d: %v", opIndex, err)
}
for _, n := range nsPreparer.namespaces() {
if _, ok := numPodsScheduledPerNamespace[n]; ok {
@@ -875,7 +982,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
if concreteOp.Namespace != nil {
namespace = *concreteOp.Namespace
}
- createNamespaceIfNotPresent(ctx, b, client, namespace, &numPodsScheduledPerNamespace)
+ createNamespaceIfNotPresent(ctx, tb, client, namespace, &numPodsScheduledPerNamespace)
if concreteOp.PodTemplatePath == nil {
concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath
}
@@ -891,7 +998,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
if concreteOp.CollectMetrics {
collectorCtx, collectorCancel = context.WithCancel(ctx)
defer collectorCancel()
- collectors = getTestDataCollectors(b, podInformer, fmt.Sprintf("%s/%s", b.Name(), namespace), namespace, tc.MetricsCollectorConfig)
+ collectors = getTestDataCollectors(tb, podInformer, fmt.Sprintf("%s/%s", tb.Name(), namespace), namespace, tc.MetricsCollectorConfig, throughputErrorMargin)
for _, collector := range collectors {
// Need loop-local variable for function below.
collector := collector
@@ -902,8 +1009,8 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
}()
}
}
- if err := createPods(ctx, b, namespace, concreteOp, client); err != nil {
- b.Fatalf("op %d: %v", opIndex, err)
+ if err := createPods(ctx, tb, namespace, concreteOp, client); err != nil {
+ tb.Fatalf("op %d: %v", opIndex, err)
}
if concreteOp.SkipWaitToCompletion {
// Only record those namespaces that may potentially require barriers
@@ -914,8 +1021,8 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
numPodsScheduledPerNamespace[namespace] = concreteOp.Count
}
} else {
- if err := waitUntilPodsScheduledInNamespace(ctx, b, podInformer, namespace, concreteOp.Count); err != nil {
- b.Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err)
+ if err := waitUntilPodsScheduledInNamespace(ctx, tb, podInformer, namespace, concreteOp.Count); err != nil {
+ tb.Fatalf("op %d: error in waiting for pods to get scheduled: %v", opIndex, err)
}
}
if concreteOp.CollectMetrics {
@@ -949,7 +1056,7 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
// Ensure the namespace exists.
nsObj := &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}
if _, err := client.CoreV1().Namespaces().Create(ctx, nsObj, metav1.CreateOptions{}); err != nil && !apierrors.IsAlreadyExists(err) {
- b.Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err)
+ tb.Fatalf("op %d: unable to create namespace %v: %v", opIndex, namespace, err)
}
var churnFns []func(name string) string
@@ -957,12 +1064,12 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
for i, path := range concreteOp.TemplatePaths {
unstructuredObj, gvk, err := getUnstructuredFromFile(path)
if err != nil {
- b.Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err)
+ tb.Fatalf("op %d: unable to parse the %v-th template path: %v", opIndex, i, err)
}
// Obtain GVR.
mapping, err := restMapper.RESTMapping(gvk.GroupKind(), gvk.Version)
if err != nil {
- b.Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err)
+ tb.Fatalf("op %d: unable to find GVR for %v: %v", opIndex, gvk, err)
}
gvr := mapping.Resource
// Distinguish cluster-scoped with namespaced API objects.
@@ -1043,11 +1150,11 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
case *barrierOp:
for _, namespace := range concreteOp.Namespaces {
if _, ok := numPodsScheduledPerNamespace[namespace]; !ok {
- b.Fatalf("op %d: unknown namespace %s", opIndex, namespace)
+ tb.Fatalf("op %d: unknown namespace %s", opIndex, namespace)
}
}
- if err := waitUntilPodsScheduled(ctx, b, podInformer, concreteOp.Namespaces, numPodsScheduledPerNamespace); err != nil {
- b.Fatalf("op %d: %v", opIndex, err)
+ if err := waitUntilPodsScheduled(ctx, tb, podInformer, concreteOp.Namespaces, numPodsScheduledPerNamespace); err != nil {
+ tb.Fatalf("op %d: %v", opIndex, err)
}
// At the end of the barrier, we can be sure that there are no pods
// pending scheduling in the namespaces that we just blocked on.
@@ -1067,19 +1174,19 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
default:
runable, ok := concreteOp.(runnableOp)
if !ok {
- b.Fatalf("op %d: invalid op %v", opIndex, concreteOp)
+ tb.Fatalf("op %d: invalid op %v", opIndex, concreteOp)
}
for _, namespace := range runable.requiredNamespaces() {
- createNamespaceIfNotPresent(ctx, b, client, namespace, &numPodsScheduledPerNamespace)
+ createNamespaceIfNotPresent(ctx, tb, client, namespace, &numPodsScheduledPerNamespace)
}
- runable.run(ctx, b, client)
+ runable.run(ctx, tb, client)
}
}
// check unused params and inform users
unusedParams := w.unusedParams()
if len(unusedParams) != 0 {
- b.Fatalf("the parameters %v are defined on workload %s, but unused.\\nPlease make sure there are no typos.", unusedParams, w.Name)
+ tb.Fatalf("the parameters %v are defined on workload %s, but unused.\\nPlease make sure there are no typos.", unusedParams, w.Name)
}
// Some tests have unschedulable pods. Do not add an implicit barrier at the
@@ -1151,13 +1258,13 @@ func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client cl
}).WithTimeout(5*time.Minute).Should(gomega.BeEmpty(), "deleting namespaces")
}
-func createNamespaceIfNotPresent(ctx context.Context, b *testing.B, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) {
+func createNamespaceIfNotPresent(ctx context.Context, tb testing.TB, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) {
if _, ok := (*podsPerNamespace)[namespace]; !ok {
// The namespace has not created yet.
// So, create that and register it.
_, err := client.CoreV1().Namespaces().Create(ctx, &v1.Namespace{ObjectMeta: metav1.ObjectMeta{Name: namespace}}, metav1.CreateOptions{})
if err != nil {
- b.Fatalf("failed to create namespace for Pod: %v", namespace)
+ tb.Fatalf("failed to create namespace for Pod: %v", namespace)
}
(*podsPerNamespace)[namespace] = 0
}
@@ -1168,12 +1275,12 @@ type testDataCollector interface {
collect() []DataItem
}
-func getTestDataCollectors(tb testing.TB, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig) []testDataCollector {
+func getTestDataCollectors(tb testing.TB, podInformer coreinformers.PodInformer, name, namespace string, mcc *metricsCollectorConfig, throughputErrorMargin float64) []testDataCollector {
if mcc == nil {
mcc = &defaultMetricsCollectorConfig
}
return []testDataCollector{
- newThroughputCollector(tb, podInformer, map[string]string{"Name": name}, []string{namespace}),
+ newThroughputCollector(tb, podInformer, map[string]string{"Name": name}, []string{namespace}, throughputErrorMargin),
newMetricsCollector(mcc, map[string]string{"Name": name}),
}
}
@@ -1206,12 +1313,12 @@ func getNodePreparer(prefix string, cno *createNodesOp, clientset clientset.Inte
), nil
}
-func createPods(ctx context.Context, b *testing.B, namespace string, cpo *createPodsOp, clientset clientset.Interface) error {
+func createPods(ctx context.Context, tb testing.TB, namespace string, cpo *createPodsOp, clientset clientset.Interface) error {
strategy, err := getPodStrategy(cpo)
if err != nil {
return err
}
- b.Logf("creating %d pods in namespace %q", cpo.Count, namespace)
+ tb.Logf("creating %d pods in namespace %q", cpo.Count, namespace)
config := testutils.NewTestPodCreatorConfig()
config.AddStrategy(namespace, cpo.Count, strategy)
podCreator := testutils.NewTestPodCreator(clientset, config)
@@ -1221,7 +1328,7 @@ func createPods(ctx context.Context, b *testing.B, namespace string, cpo *create
// waitUntilPodsScheduledInNamespace blocks until all pods in the given
// namespace are scheduled. Times out after 10 minutes because even at the
// lowest observed QPS of ~10 pods/sec, a 5000-node test should complete.
-func waitUntilPodsScheduledInNamespace(ctx context.Context, b *testing.B, podInformer coreinformers.PodInformer, namespace string, wantCount int) error {
+func waitUntilPodsScheduledInNamespace(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespace string, wantCount int) error {
return wait.PollImmediate(1*time.Second, 10*time.Minute, func() (bool, error) {
select {
case <-ctx.Done():
@@ -1233,17 +1340,17 @@ func waitUntilPodsScheduledInNamespace(ctx context.Context, b *testing.B, podInf
return false, err
}
if len(scheduled) >= wantCount {
- b.Logf("scheduling succeed")
+ tb.Logf("scheduling succeed")
return true, nil
}
- b.Logf("namespace: %s, pods: want %d, got %d", namespace, wantCount, len(scheduled))
+ tb.Logf("namespace: %s, pods: want %d, got %d", namespace, wantCount, len(scheduled))
return false, nil
})
}
// waitUntilPodsScheduled blocks until the all pods in the given namespaces are
// scheduled.
-func waitUntilPodsScheduled(ctx context.Context, b *testing.B, podInformer coreinformers.PodInformer, namespaces []string, numPodsScheduledPerNamespace map[string]int) error {
+func waitUntilPodsScheduled(ctx context.Context, tb testing.TB, podInformer coreinformers.PodInformer, namespaces []string, numPodsScheduledPerNamespace map[string]int) error {
// If unspecified, default to all known namespaces.
if len(namespaces) == 0 {
for namespace := range numPodsScheduledPerNamespace {
@@ -1260,7 +1367,7 @@ func waitUntilPodsScheduled(ctx context.Context, b *testing.B, podInformer corei
if !ok {
return fmt.Errorf("unknown namespace %s", namespace)
}
- if err := waitUntilPodsScheduledInNamespace(ctx, b, podInformer, namespace, wantCount); err != nil {
+ if err := waitUntilPodsScheduledInNamespace(ctx, tb, podInformer, namespace, wantCount); err != nil {
return fmt.Errorf("error waiting for pods in namespace %q: %w", namespace, err)
}
}
@@ -1414,10 +1521,10 @@ type namespacePreparer struct {
count int
prefix string
spec *v1.Namespace
- t testing.TB
+ tb testing.TB
}
-func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface, b *testing.B) (*namespacePreparer, error) {
+func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface, tb testing.TB) (*namespacePreparer, error) {
ns := &v1.Namespace{}
if cno.NamespaceTemplatePath != nil {
if err := getSpecFromFile(cno.NamespaceTemplatePath, ns); err != nil {
@@ -1430,7 +1537,7 @@ func newNamespacePreparer(cno *createNamespacesOp, clientset clientset.Interface
count: cno.Count,
prefix: cno.Prefix,
spec: ns,
- t: b,
+ tb: tb,
}, nil
}
@@ -1449,7 +1556,7 @@ func (p *namespacePreparer) prepare(ctx context.Context) error {
if p.spec != nil {
base = p.spec
}
- p.t.Logf("Making %d namespaces with prefix %q and template %v", p.count, p.prefix, *base)
+ p.tb.Logf("Making %d namespaces with prefix %q and template %v", p.count, p.prefix, *base)
for i := 0; i < p.count; i++ {
n := base.DeepCopy()
n.Name = fmt.Sprintf("%s-%d", p.prefix, i)
@@ -1469,7 +1576,7 @@ func (p *namespacePreparer) cleanup(ctx context.Context) error {
for i := 0; i < p.count; i++ {
n := fmt.Sprintf("%s-%d", p.prefix, i)
if err := p.client.CoreV1().Namespaces().Delete(ctx, n, metav1.DeleteOptions{}); err != nil {
- p.t.Errorf("Deleting Namespace: %v", err)
+ p.tb.Errorf("Deleting Namespace: %v", err)
errRet = err
}
}
diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go
index 91d6edc78ad..79bc3ab10bb 100644
--- a/test/integration/scheduler_perf/util.go
+++ b/test/integration/scheduler_perf/util.go
@@ -73,7 +73,7 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) {
return &cfg, nil
}
-// mustSetupScheduler starts the following components:
+// mustSetupCluster starts the following components:
// - k8s api server
// - scheduler
// - some of the kube-controller-manager controllers
@@ -82,11 +82,11 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) {
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
-func mustSetupCluster(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
+func mustSetupCluster(ctx context.Context, tb testing.TB, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
// Run API server with minimimal logging by default. Can be raised with -v.
framework.MinVerbosity = 0
- _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, b, framework.TestServerSetup{
+ _, kubeConfig, tearDownFn := framework.StartTestServer(ctx, tb, framework.TestServerSetup{
ModifyServerRunOptions: func(opts *options.ServerRunOptions) {
// Disable ServiceAccount admission plugin as we don't have serviceaccount controller running.
opts.Admission.GenericAdmission.DisablePlugins = []string{"ServiceAccount", "TaintNodesByCondition", "Priority"}
@@ -99,12 +99,12 @@ func mustSetupCluster(ctx context.Context, b *testing.B, config *config.KubeSche
}
},
})
- b.Cleanup(tearDownFn)
+ tb.Cleanup(tearDownFn)
// Cleanup will be in reverse order: first the clients get cancelled,
// then the apiserver is torn down.
ctx, cancel := context.WithCancel(ctx)
- b.Cleanup(cancel)
+ tb.Cleanup(cancel)
// TODO: client connection configuration, such as QPS or Burst is configurable in theory, this could be derived from the `config`, need to
// support this when there is any testcase that depends on such configuration.
@@ -117,7 +117,7 @@ func mustSetupCluster(ctx context.Context, b *testing.B, config *config.KubeSche
var err error
config, err = newDefaultComponentConfig()
if err != nil {
- b.Fatalf("Error creating default component config: %v", err)
+ tb.Fatalf("Error creating default component config: %v", err)
}
}
@@ -128,14 +128,14 @@ func mustSetupCluster(ctx context.Context, b *testing.B, config *config.KubeSche
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
_, informerFactory := util.StartScheduler(ctx, client, cfg, config)
util.StartFakePVController(ctx, client, informerFactory)
- runGC := util.CreateGCController(ctx, b, *cfg, informerFactory)
- runNS := util.CreateNamespaceController(ctx, b, *cfg, informerFactory)
+ runGC := util.CreateGCController(ctx, tb, *cfg, informerFactory)
+ runNS := util.CreateNamespaceController(ctx, tb, *cfg, informerFactory)
runResourceClaimController := func() {}
if enabledFeatures[features.DynamicResourceAllocation] {
// Testing of DRA with inline resource claims depends on this
// controller for creating and removing ResourceClaims.
- runResourceClaimController = util.CreateResourceClaimController(ctx, b, client, informerFactory)
+ runResourceClaimController = util.CreateResourceClaimController(ctx, tb, client, informerFactory)
}
informerFactory.Start(ctx.Done())
@@ -320,14 +320,16 @@ type throughputCollector struct {
schedulingThroughputs []float64
labels map[string]string
namespaces []string
+ errorMargin float64
}
-func newThroughputCollector(tb testing.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string) *throughputCollector {
+func newThroughputCollector(tb testing.TB, podInformer coreinformers.PodInformer, labels map[string]string, namespaces []string, errorMargin float64) *throughputCollector {
return &throughputCollector{
tb: tb,
podInformer: podInformer,
labels: labels,
namespaces: namespaces,
+ errorMargin: errorMargin,
}
}
@@ -388,9 +390,7 @@ func (tc *throughputCollector) run(ctx context.Context) {
throughput := float64(newScheduled) / durationInSeconds
expectedDuration := throughputSampleInterval * time.Duration(skipped+1)
errorMargin := (duration - expectedDuration).Seconds() / expectedDuration.Seconds() * 100
- // TODO: To prevent the perf-test failure, we increased the error margin, if still not enough
- // one day, we should think of another approach to avoid this trick.
- if math.Abs(errorMargin) > 30 {
+ if tc.errorMargin > 0 && math.Abs(errorMargin) > tc.errorMargin {
// This might affect the result, report it.
tc.tb.Errorf("ERROR: Expected throuput collector to sample at regular time intervals. The %d most recent intervals took %s instead of %s, a difference of %0.1f%%.", skipped+1, duration, expectedDuration, errorMargin)
}
|
71
|
scheduler_perf: fix namespace deletion
Merely deleting the namespace is not enough:
- Workloads might rely on the garbage collector to get rid of obsolete objects,
so we should run it to be on the safe side.
- Pods must be force-deleted because kubelet is not running.
- Finally, the namespace controller is needed to get rid of
deleted namespaces.
|
2023-08-18
| null |
index f1e8435e09b..215c369a082 100644
--- a/test/integration/job/job_test.go
+++ b/test/integration/job/job_test.go
@@ -41,27 +41,22 @@ import (
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apimachinery/pkg/watch"
"k8s.io/apiserver/pkg/util/feature"
- cacheddiscovery "k8s.io/client-go/discovery/cached/memory"
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
typedv1 "k8s.io/client-go/kubernetes/typed/batch/v1"
- "k8s.io/client-go/metadata"
- "k8s.io/client-go/metadata/metadatainformer"
restclient "k8s.io/client-go/rest"
- "k8s.io/client-go/restmapper"
"k8s.io/client-go/util/retry"
featuregatetesting "k8s.io/component-base/featuregate/testing"
basemetrics "k8s.io/component-base/metrics"
"k8s.io/component-base/metrics/testutil"
- "k8s.io/controller-manager/pkg/informerfactory"
"k8s.io/klog/v2"
kubeapiservertesting "k8s.io/kubernetes/cmd/kube-apiserver/app/testing"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
- "k8s.io/kubernetes/pkg/controller/garbagecollector"
jobcontroller "k8s.io/kubernetes/pkg/controller/job"
"k8s.io/kubernetes/pkg/controller/job/metrics"
"k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/test/integration/framework"
+ "k8s.io/kubernetes/test/integration/util"
"k8s.io/utils/pointer"
)
@@ -1313,7 +1308,7 @@ func TestOrphanPodsFinalizersClearedWithGC(t *testing.T) {
defer cancel()
restConfig.QPS = 200
restConfig.Burst = 200
- runGC := createGC(ctx, t, restConfig, informerSet)
+ runGC := util.CreateGCController(ctx, t, *restConfig, informerSet)
informerSet.Start(ctx.Done())
go jc.Run(ctx, 1)
runGC()
@@ -2092,40 +2087,6 @@ func createJobControllerWithSharedInformers(restConfig *restclient.Config, infor
return jc, ctx, cancel
}
-func createGC(ctx context.Context, t *testing.T, restConfig *restclient.Config, informerSet informers.SharedInformerFactory) func() {
- restConfig = restclient.AddUserAgent(restConfig, "gc-controller")
- clientSet := clientset.NewForConfigOrDie(restConfig)
- metadataClient, err := metadata.NewForConfig(restConfig)
- if err != nil {
- t.Fatalf("Failed to create metadataClient: %v", err)
- }
- restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient(clientSet.Discovery()))
- restMapper.Reset()
- metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
- alwaysStarted := make(chan struct{})
- close(alwaysStarted)
- gc, err := garbagecollector.NewGarbageCollector(
- clientSet,
- metadataClient,
- restMapper,
- garbagecollector.DefaultIgnoredResources(),
- informerfactory.NewInformerFactory(informerSet, metadataInformers),
- alwaysStarted,
- )
- if err != nil {
- t.Fatalf("Failed creating garbage collector")
- }
- startGC := func() {
- syncPeriod := 5 * time.Second
- go wait.Until(func() {
- restMapper.Reset()
- }, syncPeriod, ctx.Done())
- go gc.Run(ctx, 1)
- go gc.Sync(ctx, clientSet.Discovery(), syncPeriod)
- }
- return startGC
-}
-
func hasJobTrackingFinalizer(obj metav1.Object) bool {
for _, fin := range obj.GetFinalizers() {
if fin == batchv1.JobTrackingFinalizer {
diff --git a/test/integration/scheduler_perf/scheduler_perf_test.go b/test/integration/scheduler_perf/scheduler_perf_test.go
index ba051aa8c9c..bd02232eeee 100644
--- a/test/integration/scheduler_perf/scheduler_perf_test.go
+++ b/test/integration/scheduler_perf/scheduler_perf_test.go
@@ -30,6 +30,8 @@ import (
"testing"
"time"
+ "github.com/onsi/gomega"
+
v1 "k8s.io/api/core/v1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
apierrors "k8s.io/apimachinery/pkg/api/errors"
@@ -47,6 +49,7 @@ import (
"k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/legacyregistry"
+ "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
@@ -794,11 +797,11 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
b.Fatalf("validate scheduler config file failed: %v", err)
}
}
- informerFactory, client, dynClient := mustSetupScheduler(ctx, b, cfg, tc.FeatureGates)
+ informerFactory, client, dynClient := mustSetupCluster(ctx, b, cfg, tc.FeatureGates)
// Additional informers needed for testing. The pod informer was
// already created before (scheduler.NewInformerFactory) and the
- // factory was started for it (mustSetupScheduler), therefore we don't
+ // factory was started for it (mustSetupCluster), therefore we don't
// need to start again.
podInformer := informerFactory.Core().V1().Pods()
@@ -816,13 +819,8 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
numPodsScheduledPerNamespace := make(map[string]int)
if cleanup {
- b.Cleanup(func() {
- for namespace := range numPodsScheduledPerNamespace {
- if err := client.CoreV1().Namespaces().Delete(ctx, namespace, metav1.DeleteOptions{}); err != nil {
- b.Errorf("Deleting Namespace in numPodsScheduledPerNamespace: %v", err)
- }
- }
- })
+ // This must run before controllers get shut down.
+ defer cleanupWorkload(ctx, b, tc, client, numPodsScheduledPerNamespace)
}
for opIndex, op := range unrollWorkloadTemplate(b, tc.WorkloadTemplate, w) {
@@ -1089,6 +1087,70 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
return dataItems
}
+// cleanupWorkload ensures that everything is removed from the API server that
+// might have been created by runWorkload. This must be done before starting
+// the next workload because otherwise it might stumble over previously created
+// objects. For example, the namespaces are the same in different workloads, so
+// not deleting them would cause the next one to fail with "cannot create
+// namespace: already exists".
+//
+// Calling cleanupWorkload can be skipped if it is known that the next workload
+// will run with a fresh etcd instance.
+func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client clientset.Interface, numPodsScheduledPerNamespace map[string]int) {
+ deleteNow := *metav1.NewDeleteOptions(0)
+ for namespace := range numPodsScheduledPerNamespace {
+ // Pods have to be deleted explicitly, with no grace period. Normally
+ // kubelet will set the DeletionGracePeriodSeconds to zero when it's okay
+ // to remove a deleted pod, but we don't run kubelet...
+ if err := client.CoreV1().Pods(namespace).DeleteCollection(ctx, deleteNow, metav1.ListOptions{}); err != nil {
+ tb.Fatalf("failed to delete pods in namespace %q: %v", namespace, err)
+ }
+ if err := client.CoreV1().Namespaces().Delete(ctx, namespace, deleteNow); err != nil {
+ tb.Fatalf("Deleting Namespace %q in numPodsScheduledPerNamespace: %v", namespace, err)
+ }
+ }
+
+ // We need to wait here because even with deletion timestamp set,
+ // actually removing a namespace can take some time (garbage collecting
+ // other generated object like secrets, etc.) and we don't want to
+ // start the next workloads while that cleanup is still going on.
+ gomega.NewGomegaWithT(tb).Eventually(ctx, func(ctx context.Context) ([]interface{}, error) {
+ var objects []interface{}
+ namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+ // Collecting several objects of interest (pods, claims) is done to
+ // provide a more informative failure message when a namespace doesn't
+ // disappear quickly enough.
+ for _, namespace := range namespaces.Items {
+ if _, ok := numPodsScheduledPerNamespace[namespace.Name]; !ok {
+ // Not a namespace created by the workload.
+ continue
+ }
+ pods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+ if len(pods.Items) > 0 {
+ // Record one pod per namespace - that's usually enough information.
+ objects = append(objects, pods.Items[0])
+ }
+ if tc.FeatureGates[features.DynamicResourceAllocation] {
+ claims, err := client.ResourceV1alpha2().ResourceClaims(namespace.Name).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ return nil, err
+ }
+ if len(claims.Items) > 0 {
+ objects = append(objects, claims.Items[0])
+ }
+ }
+ objects = append(objects, namespace)
+ }
+ return objects, nil
+ }).WithTimeout(5*time.Minute).Should(gomega.BeEmpty(), "deleting namespaces")
+}
+
func createNamespaceIfNotPresent(ctx context.Context, b *testing.B, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) {
if _, ok := (*podsPerNamespace)[namespace]; !ok {
// The namespace has not created yet.
diff --git a/test/integration/scheduler_perf/util.go b/test/integration/scheduler_perf/util.go
index e9fb5273be8..91d6edc78ad 100644
--- a/test/integration/scheduler_perf/util.go
+++ b/test/integration/scheduler_perf/util.go
@@ -76,11 +76,13 @@ func newDefaultComponentConfig() (*config.KubeSchedulerConfiguration, error) {
// mustSetupScheduler starts the following components:
// - k8s api server
// - scheduler
+// - some of the kube-controller-manager controllers
+//
// It returns regular and dynamic clients, and destroyFunc which should be used to
// remove resources after finished.
// Notes on rate limiter:
// - client rate limit is set to 5000.
-func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
+func mustSetupCluster(ctx context.Context, b *testing.B, config *config.KubeSchedulerConfiguration, enabledFeatures map[featuregate.Feature]bool) (informers.SharedInformerFactory, clientset.Interface, dynamic.Interface) {
// Run API server with minimimal logging by default. Can be raised with -v.
framework.MinVerbosity = 0
@@ -126,6 +128,8 @@ func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSc
// be applied to start a scheduler, most of them are defined in `scheduler.schedulerOptions`.
_, informerFactory := util.StartScheduler(ctx, client, cfg, config)
util.StartFakePVController(ctx, client, informerFactory)
+ runGC := util.CreateGCController(ctx, b, *cfg, informerFactory)
+ runNS := util.CreateNamespaceController(ctx, b, *cfg, informerFactory)
runResourceClaimController := func() {}
if enabledFeatures[features.DynamicResourceAllocation] {
@@ -136,6 +140,8 @@ func mustSetupScheduler(ctx context.Context, b *testing.B, config *config.KubeSc
informerFactory.Start(ctx.Done())
informerFactory.WaitForCacheSync(ctx.Done())
+ go runGC()
+ go runNS()
go runResourceClaimController()
return informerFactory, client, dynClient
diff --git a/test/integration/util/util.go b/test/integration/util/util.go
index 4a0451d326d..0875350d3c5 100644
--- a/test/integration/util/util.go
+++ b/test/integration/util/util.go
@@ -36,17 +36,22 @@ import (
"k8s.io/client-go/informers"
clientset "k8s.io/client-go/kubernetes"
corelisters "k8s.io/client-go/listers/core/v1"
+ "k8s.io/client-go/metadata"
+ "k8s.io/client-go/metadata/metadatainformer"
restclient "k8s.io/client-go/rest"
"k8s.io/client-go/restmapper"
"k8s.io/client-go/scale"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/tools/events"
pvutil "k8s.io/component-helpers/storage/volume"
+ "k8s.io/controller-manager/pkg/informerfactory"
"k8s.io/klog/v2"
"k8s.io/kube-scheduler/config/v1beta3"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
podutil "k8s.io/kubernetes/pkg/api/v1/pod"
"k8s.io/kubernetes/pkg/controller/disruption"
+ "k8s.io/kubernetes/pkg/controller/garbagecollector"
+ "k8s.io/kubernetes/pkg/controller/namespace"
"k8s.io/kubernetes/pkg/controller/resourceclaim"
"k8s.io/kubernetes/pkg/controlplane"
"k8s.io/kubernetes/pkg/scheduler"
@@ -162,6 +167,67 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface, i
})
}
+// CreateGCController creates a garbage controller and returns a run function
+// for it. The informer factory needs to be started before invoking that
+// function.
+func CreateGCController(ctx context.Context, tb testing.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() {
+ restclient.AddUserAgent(&restConfig, "gc-controller")
+ clientSet := clientset.NewForConfigOrDie(&restConfig)
+ metadataClient, err := metadata.NewForConfig(&restConfig)
+ if err != nil {
+ tb.Fatalf("Failed to create metadataClient: %v", err)
+ }
+ restMapper := restmapper.NewDeferredDiscoveryRESTMapper(cacheddiscovery.NewMemCacheClient(clientSet.Discovery()))
+ restMapper.Reset()
+ metadataInformers := metadatainformer.NewSharedInformerFactory(metadataClient, 0)
+ alwaysStarted := make(chan struct{})
+ close(alwaysStarted)
+ gc, err := garbagecollector.NewGarbageCollector(
+ clientSet,
+ metadataClient,
+ restMapper,
+ garbagecollector.DefaultIgnoredResources(),
+ informerfactory.NewInformerFactory(informerSet, metadataInformers),
+ alwaysStarted,
+ )
+ if err != nil {
+ tb.Fatalf("Failed creating garbage collector")
+ }
+ startGC := func() {
+ syncPeriod := 5 * time.Second
+ go wait.Until(func() {
+ restMapper.Reset()
+ }, syncPeriod, ctx.Done())
+ go gc.Run(ctx, 1)
+ go gc.Sync(ctx, clientSet.Discovery(), syncPeriod)
+ }
+ return startGC
+}
+
+// CreateNamespaceController creates a namespace controller and returns a run
+// function for it. The informer factory needs to be started before invoking
+// that function.
+func CreateNamespaceController(ctx context.Context, tb testing.TB, restConfig restclient.Config, informerSet informers.SharedInformerFactory) func() {
+ restclient.AddUserAgent(&restConfig, "namespace-controller")
+ clientSet := clientset.NewForConfigOrDie(&restConfig)
+ metadataClient, err := metadata.NewForConfig(&restConfig)
+ if err != nil {
+ tb.Fatalf("Failed to create metadataClient: %v", err)
+ }
+ discoverResourcesFn := clientSet.Discovery().ServerPreferredNamespacedResources
+ controller := namespace.NewNamespaceController(
+ ctx,
+ clientSet,
+ metadataClient,
+ discoverResourcesFn,
+ informerSet.Core().V1().Namespaces(),
+ 10*time.Hour,
+ v1.FinalizerKubernetes)
+ return func() {
+ go controller.Run(ctx, 5)
+ }
+}
+
// TestContext store necessary context info
type TestContext struct {
NS *v1.Namespace
|
88
|
update openapi
|
2023-08-18
| null |
new file mode 100644
index 00000000000..224aaa826ac
--- /dev/null
+++ b/api/discovery/apis__apiregistration.k8s.io.json
@@ -0,0 +1,15 @@
+{
+ "apiVersion": "v1",
+ "kind": "APIGroup",
+ "name": "apiregistration.k8s.io",
+ "preferredVersion": {
+ "groupVersion": "apiregistration.k8s.io/v1",
+ "version": "v1"
+ },
+ "versions": [
+ {
+ "groupVersion": "apiregistration.k8s.io/v1",
+ "version": "v1"
+ }
+ ]
+}
diff --git a/api/discovery/apis__apiregistration.k8s.io__v1.json b/api/discovery/apis__apiregistration.k8s.io__v1.json
new file mode 100644
index 00000000000..470c62af20e
--- /dev/null
+++ b/api/discovery/apis__apiregistration.k8s.io__v1.json
@@ -0,0 +1,38 @@
+{
+ "apiVersion": "v1",
+ "groupVersion": "apiregistration.k8s.io/v1",
+ "kind": "APIResourceList",
+ "resources": [
+ {
+ "categories": [
+ "api-extensions"
+ ],
+ "kind": "APIService",
+ "name": "apiservices",
+ "namespaced": false,
+ "singularName": "apiservice",
+ "storageVersionHash": "InPBPD7+PqM=",
+ "verbs": [
+ "create",
+ "delete",
+ "deletecollection",
+ "get",
+ "list",
+ "patch",
+ "update",
+ "watch"
+ ]
+ },
+ {
+ "kind": "APIService",
+ "name": "apiservices/status",
+ "namespaced": false,
+ "singularName": "",
+ "verbs": [
+ "get",
+ "patch",
+ "update"
+ ]
+ }
+ ]
+}
diff --git a/api/openapi-spec/v3/apis__apiregistration.k8s.io__v1_openapi.json b/api/openapi-spec/v3/apis__apiregistration.k8s.io__v1_openapi.json
new file mode 100644
index 00000000000..2dc1cb38175
--- /dev/null
+++ b/api/openapi-spec/v3/apis__apiregistration.k8s.io__v1_openapi.json
@@ -0,0 +1,2796 @@
+{
+ "components": {
+ "schemas": {
+ "io.k8s.apimachinery.pkg.apis.meta.v1.APIResource": {
+ "description": "APIResource specifies the name of a resource and whether it is namespaced.",
+ "properties": {
+ "categories": {
+ "description": "categories is a list of the grouped resources this resource belongs to (e.g. 'all')",
+ "items": {
+ "default": "",
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "group": {
+ "description": "group is the preferred group of the resource. Empty implies the group of the containing resource list. For subresources, this may have a different value, for example: Scale\\".",
+ "type": "string"
+ },
+ "kind": {
+ "default": "",
+ "description": "kind is the kind for the resource (e.g. 'Foo' is the kind for a resource 'foo')",
+ "type": "string"
+ },
+ "name": {
+ "default": "",
+ "description": "name is the plural name of the resource.",
+ "type": "string"
+ },
+ "namespaced": {
+ "default": false,
+ "description": "namespaced indicates if a resource is namespaced or not.",
+ "type": "boolean"
+ },
+ "shortNames": {
+ "description": "shortNames is a list of suggested short names of the resource.",
+ "items": {
+ "default": "",
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "singularName": {
+ "default": "",
+ "description": "singularName is the singular name of the resource. This allows clients to handle plural and singular opaquely. The singularName is more correct for reporting status on a single item and both singular and plural are allowed from the kubectl CLI interface.",
+ "type": "string"
+ },
+ "storageVersionHash": {
+ "description": "The hash value of the storage version, the version this resource is converted to when written to the data store. Value must be treated as opaque by clients. Only equality comparison on the value is valid. This is an alpha feature and may change or be removed in the future. The field is populated by the apiserver only if the StorageVersionHash feature gate is enabled. This field will remain optional even if it graduates.",
+ "type": "string"
+ },
+ "verbs": {
+ "description": "verbs is a list of supported kube verbs (this includes get, list, watch, create, update, patch, delete, deletecollection, and proxy)",
+ "items": {
+ "default": "",
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "version": {
+ "description": "version is the preferred version of the resource. Empty implies the version of the containing resource list For subresources, this may have a different value, for example: v1 (while inside a v1beta1 version of the core resource's group)\\".",
+ "type": "string"
+ }
+ },
+ "required": [
+ "name",
+ "singularName",
+ "namespaced",
+ "kind",
+ "verbs"
+ ],
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList": {
+ "description": "APIResourceList is a list of APIResource, it is used to expose the name of the resources supported in a specific group and version, and if the resource is namespaced.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "groupVersion": {
+ "default": "",
+ "description": "groupVersion is the group and version this APIResourceList is for.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "resources": {
+ "description": "resources contains the name of the resources and if they are namespaced.",
+ "items": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResource"
+ }
+ ],
+ "default": {}
+ },
+ "type": "array"
+ }
+ },
+ "required": [
+ "groupVersion",
+ "resources"
+ ],
+ "type": "object",
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "APIResourceList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions": {
+ "description": "DeleteOptions may be provided when deleting an API object.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "dryRun": {
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "items": {
+ "default": "",
+ "type": "string"
+ },
+ "type": "array"
+ },
+ "gracePeriodSeconds": {
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "format": "int64",
+ "type": "integer"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "orphanDependents": {
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "type": "boolean"
+ },
+ "preconditions": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions"
+ }
+ ],
+ "description": "Must be fulfilled before a deletion is carried out. If not possible, a 409 Conflict status will be returned."
+ },
+ "propagationPolicy": {
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "type": "string"
+ }
+ },
+ "type": "object",
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "admission.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "admission.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "admissionregistration.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "admissionregistration.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "admissionregistration.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apiextensions.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "apiextensions.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apiregistration.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "apiregistration.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apps",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "apps",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apps",
+ "kind": "DeleteOptions",
+ "version": "v1beta2"
+ },
+ {
+ "group": "authentication.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "authentication.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "authentication.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "authorization.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "authorization.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "DeleteOptions",
+ "version": "v2"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "DeleteOptions",
+ "version": "v2beta1"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "DeleteOptions",
+ "version": "v2beta2"
+ },
+ {
+ "group": "batch",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "batch",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "certificates.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "certificates.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "certificates.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "coordination.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "coordination.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "discovery.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "discovery.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "events.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "events.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "extensions",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta2"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta3"
+ },
+ {
+ "group": "imagepolicy.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "internal.apiserver.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "networking.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "networking.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "networking.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "node.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "node.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "node.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "policy",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "policy",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "rbac.authorization.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "rbac.authorization.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "rbac.authorization.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "resource.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha2"
+ },
+ {
+ "group": "scheduling.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "scheduling.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "scheduling.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ },
+ {
+ "group": "storage.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1"
+ },
+ {
+ "group": "storage.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "storage.k8s.io",
+ "kind": "DeleteOptions",
+ "version": "v1beta1"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1": {
+ "description": "FieldsV1 stores a set of fields in a data structure like a Trie, in JSON format.\\n\\nEach key is either a '.' representing the field itself, and will always map to an empty set, or a string representing a sub-field or item. The string will follow one of these four formats: 'f:<name>', where <name> is the name of a field in a struct, or key in a map 'v:<value>', where <value> is the exact json formatted value of a list item 'i:<index>', where <index> is position of a item in a list 'k:<keys>', where <keys> is a map of a list item's key fields to their unique values If a key maps to an empty Fields value, the field that key represents is part of the set.\\n\\nThe exact format is defined in sigs.k8s.io/structured-merge-diff",
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta": {
+ "description": "ListMeta describes metadata that synthetic resources must have, including lists and various status objects. A resource may have only one of {ObjectMeta, ListMeta}.",
+ "properties": {
+ "continue": {
+ "description": "continue may be set if the user set a limit on the number of items returned, and indicates that the server has more data available. The value is opaque and may be used to issue another request to the endpoint that served this list to retrieve the next set of available objects. Continuing a consistent list may not be possible if the server configuration has changed or more than a few minutes have passed. The resourceVersion field returned when using this continue value will be identical to the value in the first response, unless you have received this token from an error message.",
+ "type": "string"
+ },
+ "remainingItemCount": {
+ "description": "remainingItemCount is the number of subsequent items in the list which are not included in this list response. If the list request contained label or field selectors, then the number of remaining items is unknown and the field will be left unset and omitted during serialization. If the list is complete (either because it is not chunking or because this is the last chunk), then there are no more remaining items and this field will be left unset and omitted during serialization. Servers older than v1.15 do not set this field. The intended use of the remainingItemCount is *estimating* the size of a collection. Clients should not rely on the remainingItemCount to be set or to be exact.",
+ "format": "int64",
+ "type": "integer"
+ },
+ "resourceVersion": {
+ "description": "String that identifies the server's internal version of this object that can be used by clients to determine when objects have changed. Value must be treated as opaque by clients and passed unmodified back to the server. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+ "type": "string"
+ },
+ "selfLink": {
+ "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry": {
+ "description": "ManagedFieldsEntry is a workflow-id, a FieldSet and the group version of the resource that the fieldset applies to.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the version of this resource that this field set applies to. The format is \\"group/version\\" just like the top-level APIVersion field. It is necessary to track the version of a field set because it cannot be automatically converted.",
+ "type": "string"
+ },
+ "fieldsType": {
+ "description": "FieldsType is the discriminator for the different fields format and version. There is currently only one possible value: \\"FieldsV1\\"",
+ "type": "string"
+ },
+ "fieldsV1": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.FieldsV1"
+ }
+ ],
+ "description": "FieldsV1 holds the first JSON version format as described in the \\"FieldsV1\\" type."
+ },
+ "manager": {
+ "description": "Manager is an identifier of the workflow managing these fields.",
+ "type": "string"
+ },
+ "operation": {
+ "description": "Operation is the type of operation which lead to this ManagedFieldsEntry being created. The only valid values for this field are 'Apply' and 'Update'.",
+ "type": "string"
+ },
+ "subresource": {
+ "description": "Subresource is the name of the subresource used to update that object, or empty string if the object was updated through the main resource. The value of this field is used to distinguish between managers, even if they share the same name. For example, a status update will be distinct from a regular update using the same manager name. Note that the APIVersion field is not related to the Subresource field and it always corresponds to the version of the main resource.",
+ "type": "string"
+ },
+ "time": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ],
+ "description": "Time is the timestamp of when the ManagedFields entry was added. The timestamp will also be updated if a field is added, the manager changes any of the owned fields value or removes a field. The timestamp does not update when a field is removed from the entry because another manager took it over."
+ }
+ },
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta": {
+ "description": "ObjectMeta is metadata that all persisted resources must have, which includes all objects users must create.",
+ "properties": {
+ "annotations": {
+ "additionalProperties": {
+ "default": "",
+ "type": "string"
+ },
+ "description": "Annotations is an unstructured key value map stored with a resource that may be set by external tools to store and retrieve arbitrary metadata. They are not queryable and should be preserved when modifying objects. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations",
+ "type": "object"
+ },
+ "creationTimestamp": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ],
+ "default": {},
+ "description": "CreationTimestamp is a timestamp representing the server time when this object was created. It is not guaranteed to be set in happens-before order across separate operations. Clients may not set this value. It is represented in RFC3339 form and is in UTC.\\n\\nPopulated by the system. Read-only. Null for lists. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
+ },
+ "deletionGracePeriodSeconds": {
+ "description": "Number of seconds allowed for this object to gracefully terminate before it will be removed from the system. Only set when deletionTimestamp is also set. May only be shortened. Read-only.",
+ "format": "int64",
+ "type": "integer"
+ },
+ "deletionTimestamp": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ],
+ "description": "DeletionTimestamp is RFC 3339 date and time at which this resource will be deleted. This field is set by the server when a graceful deletion is requested by the user, and is not directly settable by a client. The resource is expected to be deleted (no longer visible from resource lists, and not reachable by name) after the time in this field, once the finalizers list is empty. As long as the finalizers list contains items, deletion is blocked. Once the deletionTimestamp is set, this value may not be unset or be set further into the future, although it may be shortened or the resource may be deleted prior to this time. For example, a user may request that a pod is deleted in 30 seconds. The Kubelet will react by sending a graceful termination signal to the containers in the pod. After that 30 seconds, the Kubelet will send a hard termination signal (SIGKILL) to the container and after cleanup, remove the pod from the API. In the presence of network partitions, this object may still exist after this timestamp, until an administrator or automated process can determine the resource is fully terminated. If not set, graceful deletion of the object has not been requested.\\n\\nPopulated by the system when a graceful deletion is requested. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
+ },
+ "finalizers": {
+ "description": "Must be empty before the object is deleted from the registry. Each entry is an identifier for the responsible component that will remove the entry from the list. If the deletionTimestamp of the object is non-nil, entries in this list can only be removed. Finalizers may be processed and removed in any order. Order is NOT enforced because it introduces significant risk of stuck finalizers. finalizers is a shared field, any actor with permission can reorder it. If the finalizer list is processed in order, then this can lead to a situation in which the component responsible for the first finalizer in the list is waiting for a signal (field value, external system, or other) produced by a component responsible for a finalizer later in the list, resulting in a deadlock. Without enforced ordering finalizers are free to order amongst themselves and are not vulnerable to ordering changes in the list.",
+ "items": {
+ "default": "",
+ "type": "string"
+ },
+ "type": "array",
+ "x-kubernetes-patch-strategy": "merge"
+ },
+ "generateName": {
+ "description": "GenerateName is an optional prefix, used by the server, to generate a unique name ONLY IF the Name field has not been provided. If this field is used, the name returned to the client will be different than the name passed. This value will also be combined with a unique suffix. The provided value has the same validation rules as the Name field, and may be truncated by the length of the suffix required to make the value unique on the server.\\n\\nIf this field is specified and the generated name exists, the server will return a 409.\\n\\nApplied only if Name is not specified. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#idempotency",
+ "type": "string"
+ },
+ "generation": {
+ "description": "A sequence number representing a specific generation of the desired state. Populated by the system. Read-only.",
+ "format": "int64",
+ "type": "integer"
+ },
+ "labels": {
+ "additionalProperties": {
+ "default": "",
+ "type": "string"
+ },
+ "description": "Map of string keys and values that can be used to organize and categorize (scope and select) objects. May match selectors of replication controllers and services. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels",
+ "type": "object"
+ },
+ "managedFields": {
+ "description": "ManagedFields maps workflow-id and version to the set of fields that are managed by that workflow. This is mostly for internal housekeeping, and users typically shouldn't need to set or understand this field. A workflow can be the user's name, a controller's name, or the name of a specific apply path like \\"ci-cd\\". The set of fields is always in the version that the workflow used when modifying the object.",
+ "items": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ManagedFieldsEntry"
+ }
+ ],
+ "default": {}
+ },
+ "type": "array"
+ },
+ "name": {
+ "description": "Name must be unique within a namespace. Is required when creating resources, although some resources may allow a client to request the generation of an appropriate name automatically. Name is primarily intended for creation idempotence and configuration definition. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "Namespace defines the space within which each name must be unique. An empty namespace is equivalent to the \\"default\\" namespace, but \\"default\\" is the canonical representation. Not all objects are required to be scoped to a namespace - the value of this field for those objects will be empty.\\n\\nMust be a DNS_LABEL. Cannot be updated. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces",
+ "type": "string"
+ },
+ "ownerReferences": {
+ "description": "List of objects depended by this object. If ALL objects in the list have been deleted, this object will be garbage collected. If this object is managed by a controller, then an entry in this list will point to this controller, with the controller field set to true. There cannot be more than one managing controller.",
+ "items": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference"
+ }
+ ],
+ "default": {}
+ },
+ "type": "array",
+ "x-kubernetes-patch-merge-key": "uid",
+ "x-kubernetes-patch-strategy": "merge"
+ },
+ "resourceVersion": {
+ "description": "An opaque value that represents the internal version of this object that can be used by clients to determine when objects have changed. May be used for optimistic concurrency, change detection, and the watch operation on a resource or set of resources. Clients must treat these values as opaque and passed unmodified back to the server. They may only be valid for a particular resource or set of resources.\\n\\nPopulated by the system. Read-only. Value must be treated as opaque by clients and . More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#concurrency-control-and-consistency",
+ "type": "string"
+ },
+ "selfLink": {
+ "description": "Deprecated: selfLink is a legacy read-only field that is no longer populated by the system.",
+ "type": "string"
+ },
+ "uid": {
+ "description": "UID is the unique in time and space value for this object. It is typically generated by the server on successful creation of a resource and is not allowed to change on PUT operations.\\n\\nPopulated by the system. Read-only. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.OwnerReference": {
+ "description": "OwnerReference contains enough information to let you identify an owning object. An owning object must be in the same namespace as the dependent, or be cluster-scoped, so there is no namespace field.",
+ "properties": {
+ "apiVersion": {
+ "default": "",
+ "description": "API version of the referent.",
+ "type": "string"
+ },
+ "blockOwnerDeletion": {
+ "description": "If true, AND if the owner has the \\"foregroundDeletion\\" finalizer, then the owner cannot be deleted from the key-value store until this reference is removed. See https://kubernetes.io/docs/concepts/architecture/garbage-collection/#foreground-deletion for how the garbage collector interacts with this field and enforces the foreground deletion. Defaults to false. To set this field, a user needs \\"delete\\" permission of the owner, otherwise 422 (Unprocessable Entity) will be returned.",
+ "type": "boolean"
+ },
+ "controller": {
+ "description": "If true, this reference points to the managing controller.",
+ "type": "boolean"
+ },
+ "kind": {
+ "default": "",
+ "description": "Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "name": {
+ "default": "",
+ "description": "Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#names",
+ "type": "string"
+ },
+ "uid": {
+ "default": "",
+ "description": "UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids",
+ "type": "string"
+ }
+ },
+ "required": [
+ "apiVersion",
+ "kind",
+ "name",
+ "uid"
+ ],
+ "type": "object",
+ "x-kubernetes-map-type": "atomic"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Patch": {
+ "description": "Patch is provided to give a concrete name and type to the Kubernetes PATCH request body.",
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Preconditions": {
+ "description": "Preconditions must be fulfilled before an operation (update, delete, etc.) is carried out.",
+ "properties": {
+ "resourceVersion": {
+ "description": "Specifies the target ResourceVersion",
+ "type": "string"
+ },
+ "uid": {
+ "description": "Specifies the target UID.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Status": {
+ "description": "Status is a return value for calls that don't return other objects.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "code": {
+ "description": "Suggested HTTP return code for this status, 0 if not set.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "details": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails"
+ }
+ ],
+ "description": "Extended data associated with the reason. Each reason may define its own extended details. This field is optional and the data returned is not guaranteed to conform to any schema except that defined by the reason type."
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human-readable description of the status of this operation.",
+ "type": "string"
+ },
+ "metadata": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ ],
+ "default": {},
+ "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds"
+ },
+ "reason": {
+ "description": "A machine-readable description of why this operation is in the \\"Failure\\" status. If this value is empty there is no information available. A Reason clarifies an HTTP status code but does not override it.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the operation. One of: \\"Success\\" or \\"Failure\\". More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "type": "string"
+ }
+ },
+ "type": "object",
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "Status",
+ "version": "v1"
+ },
+ {
+ "group": "resource.k8s.io",
+ "kind": "Status",
+ "version": "v1alpha2"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause": {
+ "description": "StatusCause provides more information about an api.Status failure, including cases when multiple errors are encountered.",
+ "properties": {
+ "field": {
+ "description": "The field of the resource that has caused this error, as named by its JSON serialization. May include dot and postfix notation for nested attributes. Arrays are zero-indexed. Fields may appear more than once in an array of causes due to fields having multiple errors. Optional.\\n\\nExamples:\\n \\"name\\" - the field \\"name\\" on the current resource\\n \\"items[0].name\\" - the field \\"name\\" on the first array entry in \\"items\\"",
+ "type": "string"
+ },
+ "message": {
+ "description": "A human-readable description of the cause of the error. This field may be presented as-is to a reader.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "A machine-readable description of the cause of the error. If this value is empty there is no information available.",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.StatusDetails": {
+ "description": "StatusDetails is a set of additional properties that MAY be set by the server to provide additional information about a response. The Reason field of a Status object defines what attributes will be set. Clients must ignore fields that do not match the defined type of each attribute, and should assume that any attribute may be empty, invalid, or under defined.",
+ "properties": {
+ "causes": {
+ "description": "The Causes array includes more details associated with the StatusReason failure. Not all StatusReasons may provide detailed causes.",
+ "items": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.StatusCause"
+ }
+ ],
+ "default": {}
+ },
+ "type": "array"
+ },
+ "group": {
+ "description": "The group attribute of the resource associated with the status StatusReason.",
+ "type": "string"
+ },
+ "kind": {
+ "description": "The kind attribute of the resource associated with the status StatusReason. On some operations may differ from the requested resource Kind. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "name": {
+ "description": "The name attribute of the resource associated with the status StatusReason (when there is a single name which can be described).",
+ "type": "string"
+ },
+ "retryAfterSeconds": {
+ "description": "If specified, the time in seconds before the operation should be retried. Some errors may indicate the client must take an alternate action - for those errors this field may indicate how long to wait before taking the alternate action.",
+ "format": "int32",
+ "type": "integer"
+ },
+ "uid": {
+ "description": "UID of the resource. (when there is a single resource which can be described). More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names#uids",
+ "type": "string"
+ }
+ },
+ "type": "object"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.Time": {
+ "description": "Time is a wrapper around time.Time which supports correct marshaling to YAML and JSON. Wrappers are provided for many of the factory methods that the time package offers.",
+ "format": "date-time",
+ "type": "string"
+ },
+ "io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent": {
+ "description": "Event represents a single event to a watched resource.",
+ "properties": {
+ "object": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.runtime.RawExtension"
+ }
+ ],
+ "default": {},
+ "description": "Object is:\\n * If Type is Added or Modified: the new state of the object.\\n * If Type is Deleted: the state of the object immediately before deletion.\\n * If Type is Error: *Status is recommended; other types may make sense\\n depending on context."
+ },
+ "type": {
+ "default": "",
+ "type": "string"
+ }
+ },
+ "required": [
+ "type",
+ "object"
+ ],
+ "type": "object",
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "admission.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "admission.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "admissionregistration.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "admissionregistration.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "admissionregistration.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apiextensions.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "apiextensions.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apiregistration.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "apiregistration.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apps",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "apps",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "apps",
+ "kind": "WatchEvent",
+ "version": "v1beta2"
+ },
+ {
+ "group": "authentication.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "authentication.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "authentication.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "authorization.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "authorization.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "WatchEvent",
+ "version": "v2"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "WatchEvent",
+ "version": "v2beta1"
+ },
+ {
+ "group": "autoscaling",
+ "kind": "WatchEvent",
+ "version": "v2beta2"
+ },
+ {
+ "group": "batch",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "batch",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "certificates.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "certificates.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "certificates.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "coordination.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "coordination.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "discovery.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "discovery.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "events.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "events.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "extensions",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta2"
+ },
+ {
+ "group": "flowcontrol.apiserver.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta3"
+ },
+ {
+ "group": "imagepolicy.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "internal.apiserver.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "networking.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "networking.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "networking.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "node.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "node.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "node.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "policy",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "policy",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "rbac.authorization.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "rbac.authorization.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "rbac.authorization.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "resource.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha2"
+ },
+ {
+ "group": "scheduling.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "scheduling.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "scheduling.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ },
+ {
+ "group": "storage.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1"
+ },
+ {
+ "group": "storage.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1alpha1"
+ },
+ {
+ "group": "storage.k8s.io",
+ "kind": "WatchEvent",
+ "version": "v1beta1"
+ }
+ ]
+ },
+ "io.k8s.apimachinery.pkg.runtime.RawExtension": {
+ "description": "RawExtension is used to hold extensions in external versions.\\n\\nTo use this, make a field which has RawExtension as its type in your external, versioned struct, and Object in your internal struct. You also need to register your various plugin types.\\n\\n// Internal package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\",inline\\"`\\n\\t\\tMyPlugin runtime.Object `json:\\"myPlugin\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\"aOption\\"`\\n\\t}\\n\\n// External package:\\n\\n\\ttype MyAPIObject struct {\\n\\t\\truntime.TypeMeta `json:\\",inline\\"`\\n\\t\\tMyPlugin runtime.RawExtension `json:\\"myPlugin\\"`\\n\\t}\\n\\n\\ttype PluginA struct {\\n\\t\\tAOption string `json:\\"aOption\\"`\\n\\t}\\n\\n// On the wire, the JSON will look something like this:\\n\\n\\t{\\n\\t\\t\\"kind\\":\\"MyAPIObject\\",\\n\\t\\t\\"apiVersion\\":\\"v1\\",\\n\\t\\t\\"myPlugin\\": {\\n\\t\\t\\t\\"kind\\":\\"PluginA\\",\\n\\t\\t\\t\\"aOption\\":\\"foo\\",\\n\\t\\t},\\n\\t}\\n\\nSo what happens? Decode first uses json or yaml to unmarshal the serialized data into your external MyAPIObject. That causes the raw JSON to be stored, but not unpacked. The next step is to copy (using pkg/conversion) into the internal struct. The runtime package's DefaultScheme has conversion functions installed which will unpack the JSON stored in RawExtension, turning it into the correct object type, and storing it in the Object. (TODO: In the case where the object is of an unknown type, a runtime.Unknown object will be created and stored.)",
+ "type": "object"
+ },
+ "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService": {
+ "description": "APIService represents a server for a particular GroupVersion. Name must be \\"version.group\\".",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ }
+ ],
+ "default": {},
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
+ },
+ "spec": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec"
+ }
+ ],
+ "default": {},
+ "description": "Spec contains information for locating and communicating with a server"
+ },
+ "status": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus"
+ }
+ ],
+ "default": {},
+ "description": "Status contains derived information about an API server"
+ }
+ },
+ "type": "object",
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "apiregistration.k8s.io",
+ "kind": "APIService",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition": {
+ "description": "APIServiceCondition describes the state of an APIService at a particular point",
+ "properties": {
+ "lastTransitionTime": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ],
+ "default": {},
+ "description": "Last time the condition transitioned from one status to another."
+ },
+ "message": {
+ "description": "Human-readable message indicating details about last transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "Unique, one-word, CamelCase reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "default": "",
+ "description": "Status is the status of the condition. Can be True, False, Unknown.",
+ "type": "string"
+ },
+ "type": {
+ "default": "",
+ "description": "Type is the type of the condition.",
+ "type": "string"
+ }
+ },
+ "required": [
+ "type",
+ "status"
+ ],
+ "type": "object"
+ },
+ "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList": {
+ "description": "APIServiceList is a list of APIService objects.",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "description": "Items is the list of APIService",
+ "items": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ ],
+ "default": {}
+ },
+ "type": "array"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ ],
+ "default": {},
+ "description": "Standard list metadata More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata"
+ }
+ },
+ "required": [
+ "items"
+ ],
+ "type": "object",
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "apiregistration.k8s.io",
+ "kind": "APIServiceList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceSpec": {
+ "description": "APIServiceSpec contains information for locating and communicating with a server. Only https is supported, though you are able to disable certificate verification.",
+ "properties": {
+ "caBundle": {
+ "description": "CABundle is a PEM encoded CA bundle which will be used to validate an API server's serving certificate. If unspecified, system trust roots on the apiserver are used.",
+ "format": "byte",
+ "type": "string",
+ "x-kubernetes-list-type": "atomic"
+ },
+ "group": {
+ "description": "Group is the API group name this server hosts",
+ "type": "string"
+ },
+ "groupPriorityMinimum": {
+ "default": 0,
+ "description": "GroupPriorityMininum is the priority this group should have at least. Higher priority means that the group is preferred by clients over lower priority ones. Note that other versions of this group might specify even higher GroupPriorityMininum values such that the whole group gets a higher priority. The primary sort is based on GroupPriorityMinimum, ordered highest number to lowest (20 before 10). The secondary sort is based on the alphabetical comparison of the name of the object. (v1.bar before v1.foo) We'd recommend something like: *.k8s.io (except extensions) at 18000 and PaaSes (OpenShift, Deis) are recommended to be in the 2000s",
+ "format": "int32",
+ "type": "integer"
+ },
+ "insecureSkipTLSVerify": {
+ "description": "InsecureSkipTLSVerify disables TLS certificate verification when communicating with this server. This is strongly discouraged. You should use the CABundle instead.",
+ "type": "boolean"
+ },
+ "service": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference"
+ }
+ ],
+ "description": "Service is a reference to the service for this API server. It must communicate on port 443. If the Service is nil, that means the handling for the API groupversion is handled locally on this server. The call will simply delegate to the normal handler chain to be fulfilled."
+ },
+ "version": {
+ "description": "Version is the API version this server hosts. For example, \\"v1\\"",
+ "type": "string"
+ },
+ "versionPriority": {
+ "default": 0,
+ "description": "VersionPriority controls the ordering of this API version inside of its group. Must be greater than zero. The primary sort is based on VersionPriority, ordered highest to lowest (20 before 10). Since it's inside of a group, the number can be small, probably in the 10s. In case of equal version priorities, the version string will be used to compute the order inside a group. If the version string is \\"kube-like\\", it will sort above non \\"kube-like\\" version strings, which are ordered lexicographically. \\"Kube-like\\" versions start with a \\"v\\", then are followed by a number (the major version), then optionally the string \\"alpha\\" or \\"beta\\" and another number (the minor version). These are sorted first by GA > beta > alpha (where GA is a version with no suffix such as beta or alpha), and then by comparing major version, then minor version. An example sorted list of versions: v10, v2, v1, v11beta2, v10beta3, v3beta1, v12alpha1, v11alpha2, foo1, foo10.",
+ "format": "int32",
+ "type": "integer"
+ }
+ },
+ "required": [
+ "groupPriorityMinimum",
+ "versionPriority"
+ ],
+ "type": "object"
+ },
+ "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceStatus": {
+ "description": "APIServiceStatus contains derived information about an API server",
+ "properties": {
+ "conditions": {
+ "description": "Current service state of apiService.",
+ "items": {
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceCondition"
+ }
+ ],
+ "default": {}
+ },
+ "type": "array",
+ "x-kubernetes-list-map-keys": [
+ "type"
+ ],
+ "x-kubernetes-list-type": "map",
+ "x-kubernetes-patch-merge-key": "type",
+ "x-kubernetes-patch-strategy": "merge"
+ }
+ },
+ "type": "object"
+ },
+ "io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.ServiceReference": {
+ "description": "ServiceReference holds a reference to Service.legacy.k8s.io",
+ "properties": {
+ "name": {
+ "description": "Name is the name of the service",
+ "type": "string"
+ },
+ "namespace": {
+ "description": "Namespace is the namespace of the service",
+ "type": "string"
+ },
+ "port": {
+ "description": "If specified, the port on the service that hosting webhook. Default to 443 for backward compatibility. `port` should be a valid port number (1-65535, inclusive).",
+ "format": "int32",
+ "type": "integer"
+ }
+ },
+ "type": "object"
+ }
+ },
+ "securitySchemes": {
+ "BearerToken": {
+ "description": "Bearer Token authentication",
+ "in": "header",
+ "name": "authorization",
+ "type": "apiKey"
+ }
+ }
+ },
+ "info": {
+ "title": "Kubernetes",
+ "version": "unversioned"
+ },
+ "openapi": "3.0.0",
+ "paths": {
+ "/apis/apiregistration.k8s.io/v1/": {
+ "get": {
+ "description": "get available resources",
+ "operationId": "getApiregistrationV1APIResources",
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ },
+ "description": "OK"
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "tags": [
+ "apiregistration_v1"
+ ]
+ }
+ },
+ "/apis/apiregistration.k8s.io/v1/apiservices": {
+ "delete": {
+ "description": "delete collection of APIService",
+ "operationId": "deleteApiregistrationV1CollectionAPIService",
+ "parameters": [
+ {
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "in": "query",
+ "name": "continue",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "in": "query",
+ "name": "dryRun",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "in": "query",
+ "name": "fieldSelector",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "in": "query",
+ "name": "gracePeriodSeconds",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "in": "query",
+ "name": "labelSelector",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "in": "query",
+ "name": "limit",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "in": "query",
+ "name": "orphanDependents",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "in": "query",
+ "name": "propagationPolicy",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "in": "query",
+ "name": "resourceVersion",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "in": "query",
+ "name": "resourceVersionMatch",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "in": "query",
+ "name": "sendInitialEvents",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "in": "query",
+ "name": "timeoutSeconds",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "description": "OK"
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "tags": [
+ "apiregistration_v1"
+ ],
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "apiregistration.k8s.io",
+ "kind": "APIService",
+ "version": "v1"
+ }
+ },
+ "get": {
+ "description": "list or watch objects of kind APIService",
+ "operationId": "listApiregistrationV1APIService",
+ "parameters": [
+ {
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "in": "query",
+ "name": "allowWatchBookmarks",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "in": "query",
+ "name": "continue",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "in": "query",
+ "name": "fieldSelector",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "in": "query",
+ "name": "labelSelector",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "in": "query",
+ "name": "limit",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "in": "query",
+ "name": "resourceVersion",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "in": "query",
+ "name": "resourceVersionMatch",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "in": "query",
+ "name": "sendInitialEvents",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "in": "query",
+ "name": "timeoutSeconds",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "in": "query",
+ "name": "watch",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIServiceList"
+ }
+ }
+ },
+ "description": "OK"
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "tags": [
+ "apiregistration_v1"
+ ],
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "apiregistration.k8s.io",
+ "kind": "APIService",
+ "version": "v1"
+ }
+ },
+ "parameters": [
+ {
+ "description": "If 'true', then the output is pretty printed.",
+ "in": "query",
+ "name": "pretty",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "post": {
+ "description": "create an APIService",
+ "operationId": "createApiregistrationV1APIService",
+ "parameters": [
+ {
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "in": "query",
+ "name": "dryRun",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "in": "query",
+ "name": "fieldManager",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "in": "query",
+ "name": "fieldValidation",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ }
+ },
+ "description": "OK"
+ },
+ "201": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ }
+ },
+ "description": "Created"
+ },
+ "202": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ }
+ },
+ "description": "Accepted"
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "tags": [
+ "apiregistration_v1"
+ ],
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "apiregistration.k8s.io",
+ "kind": "APIService",
+ "version": "v1"
+ }
+ }
+ },
+ "/apis/apiregistration.k8s.io/v1/apiservices/{name}": {
+ "delete": {
+ "description": "delete an APIService",
+ "operationId": "deleteApiregistrationV1APIService",
+ "parameters": [
+ {
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "in": "query",
+ "name": "dryRun",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "in": "query",
+ "name": "gracePeriodSeconds",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "in": "query",
+ "name": "orphanDependents",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "in": "query",
+ "name": "propagationPolicy",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "description": "OK"
+ },
+ "202": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ },
+ "description": "Accepted"
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "tags": [
+ "apiregistration_v1"
+ ],
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "apiregistration.k8s.io",
+ "kind": "APIService",
+ "version": "v1"
+ }
+ },
+ "get": {
+ "description": "read the specified APIService",
+ "operationId": "readApiregistrationV1APIService",
+ "responses": {
+ "200": {
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"
+ }
+ }
+ },
+ "description": "OK"
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "tags": [
+ "apiregistration_v1"
+ ],
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "apiregistration.k8s.io",
+ "kind": "APIService",
+ "version": "v1"
+ }
+ },
+ "parameters": [
+ {
+ "description": "name of the APIService",
+ "in": "path",
+ "name": "name",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "description": "If 'true', then the output is pretty printed.",
+ "in": "query",
+ "name": "pretty",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "patch": {
+ "description": "partially update the specified APIService",
+ "operationId": "patchApiregistrationV1APIService",
+ "parameters": [
+ {
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "in": "query",
+ "name": "dryRun",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
|
91
|
kubectl explain should work for both cluster and namespace resources and without a GET method
|
2023-08-18
| null |
index f67a9f4ca36..4c5e1c62be5 100644
--- a/staging/src/k8s.io/kubectl/pkg/explain/v2/funcs.go
+++ b/staging/src/k8s.io/kubectl/pkg/explain/v2/funcs.go
@@ -185,6 +185,9 @@ func WithBuiltinTemplateFuncs(tmpl *template.Template) *template.Template {
return copyDict, nil
},
+ "list": func(values ...any) ([]any, error) {
+ return values, nil
+ },
"add": func(value, operand int) int {
return value + operand
},
diff --git a/staging/src/k8s.io/kubectl/pkg/explain/v2/templates/batch.k8s.io_v1.json b/staging/src/k8s.io/kubectl/pkg/explain/v2/templates/batch.k8s.io_v1.json
new file mode 100644
index 00000000000..a6b9b2eff50
--- /dev/null
+++ b/staging/src/k8s.io/kubectl/pkg/explain/v2/templates/batch.k8s.io_v1.json
@@ -0,0 +1,9106 @@
+{
+ "openapi": "3.0.0",
+ "info": {
+ "title": "Kubernetes",
+ "version": "v1.27.1"
+ },
+ "paths": {
+ "/apis/batch/v1/": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "get available resources",
+ "operationId": "getBatchV1APIResources",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.APIResourceList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ }
+ }
+ },
+ "/apis/batch/v1/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind CronJob",
+ "operationId": "listBatchV1CronJobForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind Job",
+ "operationId": "listBatchV1JobForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind CronJob",
+ "operationId": "listBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "post": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "create a CronJob",
+ "operationId": "createBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete collection of CronJob",
+ "operationId": "deleteBatchV1CollectionNamespacedCronJob",
+ "parameters": [
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read the specified CronJob",
+ "operationId": "readBatchV1NamespacedCronJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace the specified CronJob",
+ "operationId": "replaceBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete a CronJob",
+ "operationId": "deleteBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update the specified CronJob",
+ "operationId": "patchBatchV1NamespacedCronJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the CronJob",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/cronjobs/{name}/status": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read status of the specified CronJob",
+ "operationId": "readBatchV1NamespacedCronJobStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace status of the specified CronJob",
+ "operationId": "replaceBatchV1NamespacedCronJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update status of the specified CronJob",
+ "operationId": "patchBatchV1NamespacedCronJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the CronJob",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "list or watch objects of kind Job",
+ "operationId": "listBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobList"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "list",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "post": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "create a Job",
+ "operationId": "createBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "post",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete collection of Job",
+ "operationId": "deleteBatchV1CollectionNamespacedJob",
+ "parameters": [
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "deletecollection",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/jobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read the specified Job",
+ "operationId": "readBatchV1NamespacedJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace the specified Job",
+ "operationId": "replaceBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
|
94
|
Update the template.pot file with latest message IDs.
Update the template.pot file with the current state of the messages in kubectl. That allows localized messages to be updated accordingly and picked up correctly.
|
2023-08-18
| null |
index 8406dc0db21..4e60cfc997c 100644
--- a/staging/src/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot
+++ b/staging/src/k8s.io/kubectl/pkg/util/i18n/translations/kubectl/template.pot
@@ -8,7 +8,7 @@ msgid ""
msgstr ""
"Project-Id-Version: \\n"
"Report-Msgid-Bugs-To: EMAIL\\n"
-"POT-Creation-Date: 2022-11-21 22:46-0500\\n"
+"POT-Creation-Date: 2023-06-27 12:09-0400\\n"
"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\\n"
"Last-Translator: FULL NAME <EMAIL@ADDRESS>\\n"
"Language-Team: LANGUAGE <[email protected]>\\n"
@@ -17,7 +17,7 @@ msgstr ""
"Content-Type: text/plain; charset=UTF-8\\n"
"Content-Transfer-Encoding: 8bit\\n"
-#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:141
+#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:142
msgid ""
"\\n"
"\\t\\t\\t# Approve CSR 'csr-sqgzp'\\n"
@@ -25,7 +25,7 @@ msgid ""
"\\t\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:184
+#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:185
msgid ""
"\\n"
"\\t\\t\\t# Deny CSR 'csr-sqgzp'\\n"
@@ -37,7 +37,7 @@ msgstr ""
msgid ""
"\\n"
"\\t\\t\\tModify kubeconfig files using subcommands like \\"kubectl config set "
-"current-context my-context\\"\\n"
+"current-context my-context\\".\\n"
"\\n"
"\\t\\t\\tThe loading order follows these rules:\\n"
"\\n"
@@ -53,7 +53,7 @@ msgid ""
"admin --user=user1 --user=user2 --group=group1"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:58
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:57
msgid ""
"\\n"
"\\t\\t # Create a new config map named my-config based on folder bar\\n"
@@ -78,20 +78,11 @@ msgid ""
"from-env-file=path/to/bar.env"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43
-msgid ""
-"\\n"
-"\\t\\t # Create a role binding for user1, user2, and group1 using the admin "
-"cluster role\\n"
-"\\t\\t kubectl create rolebinding admin --clusterrole=admin --user=user1 --"
-"user=user2 --group=group1"
-msgstr ""
-
#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:56
msgid ""
"\\n"
-"\\t\\t # If you don't already have a .dockercfg file, you can create a "
-"dockercfg secret directly by using:\\n"
+"\\t\\t # If you do not already have a .dockercfg file, create a dockercfg "
+"secret directly\\n"
"\\t\\t kubectl create secret docker-registry my-secret --docker-"
"server=DOCKER_REGISTRY_SERVER --docker-username=DOCKER_USER --docker-"
"password=DOCKER_PASSWORD --docker-email=DOCKER_EMAIL\\n"
@@ -111,7 +102,7 @@ msgid ""
"\\t\\t kubectl top node NODE_NAME"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:42
+#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:41
msgid ""
"\\n"
"\\t\\t# !!!Important Note!!!\\n"
@@ -146,7 +137,7 @@ msgid ""
"\\t\\tkubectl cp <some-namespace>/<some-pod>:/tmp/foo /tmp/bar"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:149
+#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:153
msgid ""
"\\n"
"\\t\\t# Apply the configuration in pod.json to a pod\\n"
@@ -159,8 +150,7 @@ msgid ""
"\\t\\t# Apply the JSON passed into stdin to a pod\\n"
"\\t\\tcat pod.json | kubectl apply -f -\\n"
"\\n"
-"\\t\\t# Apply the configuration from all files that end with '.json' - i.e. "
-"expand wildcard characters in file names\\n"
+"\\t\\t# Apply the configuration from all files that end with '.json'\\n"
"\\t\\tkubectl apply -f '*.json'\\n"
"\\n"
"\\t\\t# Note: --prune is still in Alpha\\n"
@@ -175,7 +165,7 @@ msgid ""
"ConfigMap"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:49
+#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:50
#, c-format
msgid ""
"\\n"
@@ -189,7 +179,7 @@ msgid ""
"\\t\\tkubectl autoscale rc foo --max=5 --cpu-percent=80"
msgstr ""
-#: pkg/kubectl/cmd/convert/convert.go:51
+#: pkg/kubectl/cmd/convert/convert.go:52
msgid ""
"\\n"
"\\t\\t# Convert 'pod.yaml' to latest version and print to stdout.\\n"
@@ -235,7 +225,7 @@ msgid ""
"com/aggregate-to-monitoring=true\\""
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:43
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:44
msgid ""
"\\n"
"\\t\\t# Create a job\\n"
@@ -277,7 +267,7 @@ msgid ""
"\\t\\tkubectl create pdb my-pdb --selector=app=nginx --min-available=50%"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:79
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:78
msgid ""
"\\n"
"\\t\\t# Create a pod using the data in pod.json\\n"
@@ -309,6 +299,20 @@ msgid ""
"description=\\"high priority\\" --preemption-policy=\\"Never\\""
msgstr ""
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:43
+msgid ""
+"\\n"
+"\\t\\t# Create a role binding for user1, user2, and group1 using the admin "
+"cluster role\\n"
+"\\t\\tkubectl create rolebinding admin --clusterrole=admin --user=user1 --"
+"user=user2 --group=group1\\n"
+"\\n"
+"\\t\\t# Create a role binding for serviceaccount monitoring:sa-dev using the "
+"admin role\\n"
+"\\t\\tkubectl create rolebinding admin-binding --role=admin --"
+"serviceaccount=monitoring:sa-dev"
+msgstr ""
+
#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:46
msgid ""
"\\n"
@@ -328,7 +332,7 @@ msgid ""
"\\t\\tkubectl create role foo --verb=get,list,watch --resource=pods,pods/status"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:66
+#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:67
msgid ""
"\\n"
"\\t\\t# Create a service for a replicated nginx, which serves on port 80 and "
@@ -368,7 +372,7 @@ msgid ""
"\\n"
"\\t\\t# Create a single ingress called 'simple' that directs requests to foo."
"com/bar to svc\\n"
-"\\t\\t# svc1:8080 with a tls secret \\"my-cert\\"\\n"
+"\\t\\t# svc1:8080 with a TLS secret \\"my-cert\\"\\n"
"\\t\\tkubectl create ingress simple --rule=\\"foo.com/bar=svc1:8080,tls=my-"
"cert\\"\\n"
"\\n"
@@ -413,13 +417,19 @@ msgid ""
"\\t\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:76
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:78
msgid ""
"\\n"
"\\t\\t# Create an interactive debugging session in pod mypod and immediately "
"attach to it.\\n"
"\\t\\tkubectl debug mypod -it --image=busybox\\n"
"\\n"
+"\\t\\t# Create an interactive debugging session for the pod in the file pod."
+"yaml and immediately attach to it.\\n"
+"\\t\\t# (requires the EphemeralContainers feature to be enabled in the "
+"cluster)\\n"
+"\\t\\tkubectl debug -f pod.yaml -it --image=busybox\\n"
+"\\n"
"\\t\\t# Create a debug container named debugger using a custom automated "
"debugging image.\\n"
"\\t\\tkubectl debug --image=myproj/debug-tools -c debugger mypod\\n"
@@ -446,7 +456,7 @@ msgid ""
"\\t\\tkubectl debug node/mynode -it --image=busybox\\n"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:79
+#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:80
msgid ""
"\\n"
"\\t\\t# Delete a pod using the type and name specified in pod.json\\n"
@@ -456,8 +466,7 @@ msgid ""
"dir/kustomization.yaml\\n"
"\\t\\tkubectl delete -k dir\\n"
"\\n"
-"\\t\\t# Delete resources from all files that end with '.json' - i.e. expand "
-"wildcard characters in file names\\n"
+"\\t\\t# Delete resources from all files that end with '.json'\\n"
"\\t\\tkubectl delete -f '*.json'\\n"
"\\n"
"\\t\\t# Delete a pod based on the type and name in the JSON passed into stdin\\n"
@@ -495,14 +504,14 @@ msgid ""
"\\t\\tkubectl describe pods\\n"
"\\n"
"\\t\\t# Describe pods by label name=myLabel\\n"
-"\\t\\tkubectl describe po -l name=myLabel\\n"
+"\\t\\tkubectl describe pods -l name=myLabel\\n"
"\\n"
-"\\t\\t# Describe all pods managed by the 'frontend' replication controller \\n"
+"\\t\\t# Describe all pods managed by the 'frontend' replication controller\\n"
"\\t\\t# (rc-created pods get the name of the rc as a prefix in the pod name)\\n"
"\\t\\tkubectl describe pods frontend"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:76
+#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:75
msgid ""
"\\n"
"\\t\\t# Diff resources included in pod.json\\n"
@@ -512,20 +521,20 @@ msgid ""
"\\t\\tcat service.yaml | kubectl diff -f -"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:139
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:140
msgid ""
"\\n"
"\\t\\t# Drain node \\"foo\\", even if there are pods not managed by a "
-"replication controller, replica set, job, daemon set or stateful set on it\\n"
+"replication controller, replica set, job, daemon set, or stateful set on it\\n"
"\\t\\tkubectl drain foo --force\\n"
"\\n"
"\\t\\t# As above, but abort if there are pods not managed by a replication "
-"controller, replica set, job, daemon set or stateful set, and use a grace "
+"controller, replica set, job, daemon set, or stateful set, and use a grace "
"period of 15 minutes\\n"
"\\t\\tkubectl drain foo --grace-period=900"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:55
+#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:60
msgid ""
"\\n"
"\\t\\t# Edit the service named 'registry'\\n"
@@ -541,11 +550,11 @@ msgid ""
"config in its annotation\\n"
"\\t\\tkubectl edit deployment/mydeployment -o yaml --save-config\\n"
"\\n"
-"\\t\\t# Edit the deployment/mydeployment's status subresource\\n"
+"\\t\\t# Edit the 'status' subresource for the 'mydeployment' deployment\\n"
"\\t\\tkubectl edit deployment mydeployment --subresource='status'"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:46
+#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:47
msgid ""
"\\n"
"\\t\\t# Get output from running pod mypod; use the 'kubectl.kubernetes.io/"
@@ -567,7 +576,7 @@ msgid ""
"\\t\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:48
+#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:50
msgid ""
"\\n"
"\\t\\t# Get output from running the 'date' command from pod mypod, using the "
@@ -605,14 +614,23 @@ msgid ""
"\\t\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:49
+#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:47
msgid ""
"\\n"
"\\t\\t# Get the documentation of the resource and its fields\\n"
"\\t\\tkubectl explain pods\\n"
"\\n"
+"\\t\\t# Get all the fields in the resource\\n"
+"\\t\\tkubectl explain pods --recursive\\n"
+"\\n"
+"\\t\\t# Get the explanation for deployment in supported api versions\\n"
+"\\t\\tkubectl explain deployments --api-version=apps/v1\\n"
+"\\n"
"\\t\\t# Get the documentation of a specific field of a resource\\n"
-"\\t\\tkubectl explain pods.spec.containers"
+"\\t\\tkubectl explain pods.spec.containers\\n"
+"\\t\\t\\n"
+"\\t\\t# Get the documentation of resources in different format\\n"
+"\\t\\tkubectl explain deployment --output=plaintext-openapiv2"
msgstr ""
#: staging/src/k8s.io/kubectl/pkg/cmd/completion/completion.go:66
@@ -641,7 +659,7 @@ msgid ""
"bash_profile\\n"
"\\t\\t kubectl completion bash > ~/.kube/completion.bash.inc\\n"
"\\t\\t printf \\"\\n"
-"\\t\\t # Kubectl shell completion\\n"
+"\\t\\t # kubectl shell completion\\n"
"\\t\\t source '$HOME/.kube/completion.bash.inc'\\n"
"\\t\\t \\" >> $HOME/.bash_profile\\n"
"\\t\\t source $HOME/.bash_profile\\n"
@@ -654,7 +672,7 @@ msgid ""
"\\n"
"\\t\\t# Load the kubectl completion code for fish[2] into the current shell\\n"
"\\t\\t kubectl completion fish | source\\n"
-"\\t\\t# To load completions for each session, execute once: \\n"
+"\\t\\t# To load completions for each session, execute once:\\n"
"\\t\\t kubectl completion fish > ~/.config/fish/completions/kubectl.fish\\n"
"\\n"
"\\t\\t# Load the kubectl completion code for powershell into the current "
@@ -680,7 +698,7 @@ msgid ""
"\\t\\tkubectl plugin list"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:101
+#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:100
msgid ""
"\\n"
"\\t\\t# List all pods in ps output format\\n"
@@ -723,7 +741,7 @@ msgid ""
"\\t\\t# List one or more resources by their type and names\\n"
"\\t\\tkubectl get rc/web service/frontend pods/web-pod-13je7\\n"
"\\n"
-"\\t\\t# List status subresource for a single pod.\\n"
+"\\t\\t# List the 'status' subresource for a single pod\\n"
"\\t\\tkubectl get pod web-pod-13je7 --subresource status"
msgstr ""
@@ -757,14 +775,14 @@ msgid ""
"\\t\\tkubectl port-forward pod/mypod :5000"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:88
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:89
msgid ""
"\\n"
"\\t\\t# Mark node \\"foo\\" as schedulable\\n"
"\\t\\tkubectl uncordon foo"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:59
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:60
msgid ""
"\\n"
"\\t\\t# Mark node \\"foo\\" as unschedulable\\n"
@@ -795,9 +813,9 @@ msgid ""
"\\t\\t# Update a container's image using a JSON patch with positional arrays\\n"
"\\t\\tkubectl patch pod valid-pod --type='json' -p='[{\\"op\\": \\"replace\\", "
"\\"path\\": \\"/spec/containers/0/image\\", \\"value\\":\\"new image\\"}]'\\n"
-"\\t\\t\\n"
-"\\t\\t# Update a deployment's replicas through the scale subresource using a "
-"merge patch.\\n"
+"\\n"
+"\\t\\t# Update a deployment's replicas through the 'scale' subresource using a "
+"merge patch\\n"
"\\t\\tkubectl patch deployment nginx-deployment --subresource='scale' --"
"type='merge' -p '{\\"spec\\":{\\"replicas\\":2}}'"
msgstr ""
@@ -809,7 +827,7 @@ msgid ""
"\\t\\tkubectl options"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:44
+#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:45
msgid ""
"\\n"
"\\t\\t# Print the address of the control plane and cluster services\\n"
@@ -823,7 +841,7 @@ msgid ""
"\\t\\tkubectl version"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:34
+#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:35
msgid ""
"\\n"
"\\t\\t# Print the supported API versions\\n"
@@ -847,7 +865,7 @@ msgid ""
"\\t\\tkubectl replace --force -f ./pod.json"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:54
+#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:55
msgid ""
"\\n"
"\\t\\t# Return snapshot logs from pod nginx with only one container\\n"
@@ -888,7 +906,7 @@ msgid ""
"\\t\\tkubectl logs deployment/nginx -c nginx-1"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:49
+#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:50
msgid ""
"\\n"
"\\t\\t# Scale a replica set named 'foo' to 3\\n"
@@ -902,7 +920,7 @@ msgid ""
"\\t\\tkubectl scale --current-replicas=2 --replicas=3 deployment/mysql\\n"
"\\n"
"\\t\\t# Scale multiple replication controllers\\n"
-"\\t\\tkubectl scale --replicas=5 rc/foo rc/bar rc/baz\\n"
+"\\t\\tkubectl scale --replicas=5 rc/example1 rc/example2 rc/example3\\n"
"\\n"
"\\t\\t# Scale stateful set named 'web' to 3\\n"
"\\t\\tkubectl scale --replicas=3 statefulset/web"
@@ -941,7 +959,7 @@ msgid ""
"\\t\\tkubectl top pod -l name=myLabel"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:62
+#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:63
msgid ""
"\\n"
"\\t\\t# Start a nginx pod\\n"
@@ -980,7 +998,7 @@ msgid ""
"\\t\\tkubectl run nginx --image=nginx --command -- <cmd> <arg1> ... <argN>"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:76
+#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:78
msgid ""
"\\n"
"\\t\\t# To proxy all of the Kubernetes API and nothing else\\n"
@@ -1025,7 +1043,7 @@ msgid ""
"\\t\\t# Remove from node 'foo' all the taints with key 'dedicated'\\n"
"\\t\\tkubectl taint nodes foo dedicated-\\n"
"\\n"
-"\\t\\t# Add a taint with key 'dedicated' on nodes having label mylabel=X\\n"
+"\\t\\t# Add a taint with key 'dedicated' on nodes having label myLabel=X\\n"
"\\t\\tkubectl taint node -l myLabel=X dedicated=foo:PreferNoSchedule\\n"
"\\n"
"\\t\\t# Add to node 'foo' a taint with key 'bar' and no value\\n"
@@ -1066,7 +1084,7 @@ msgid ""
"\\t\\tkubectl apply view-last-applied -f deploy.yaml -o json"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:66
+#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:67
msgid ""
"\\n"
"\\t\\t# Wait for the pod \\"busybox1\\" to contain the status condition of type "
@@ -1075,11 +1093,11 @@ msgid ""
"\\n"
"\\t\\t# The default value of status condition is true; you can wait for other "
"targets after an equal delimiter (compared after Unicode simple case "
-"folding, which is a more general form of case-insensitivity):\\n"
+"folding, which is a more general form of case-insensitivity)\\n"
"\\t\\tkubectl wait --for=condition=Ready=false pod/busybox1\\n"
"\\n"
"\\t\\t# Wait for the pod \\"busybox1\\" to contain the status phase to be "
-"\\"Running\\".\\n"
+"\\"Running\\"\\n"
"\\t\\tkubectl wait --for=jsonpath='{.status.phase}'=Running pod/busybox1\\n"
"\\n"
"\\t\\t# Wait for the pod \\"busybox1\\" to be deleted, with a timeout of 60s, "
@@ -1088,7 +1106,7 @@ msgid ""
"\\t\\tkubectl wait --for=delete pod/busybox1 --timeout=60s"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:140
+#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:144
msgid ""
"\\n"
"\\t\\tApply a configuration to a resource by file name or stdin.\\n"
@@ -1104,7 +1122,7 @@ msgid ""
"k8s.io/34274."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:129
+#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:130
msgid ""
"\\n"
"\\t\\tApprove a certificate signing request.\\n"
@@ -1113,7 +1131,7 @@ msgid ""
"certificate\\n"
"\\t\\tsigning request (CSR). This action tells a certificate signing "
"controller to\\n"
-"\\t\\tissue a certificate to the requestor with the attributes requested in "
+"\\t\\tissue a certificate to the requester with the attributes requested in "
"the CSR.\\n"
"\\n"
"\\t\\tSECURITY NOTICE: Depending on the requested attributes, the issued "
@@ -1126,7 +1144,7 @@ msgid ""
"\\t\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:28
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:30
msgid ""
"\\n"
"\\t\\tConfigure application resources.\\n"
@@ -1134,7 +1152,7 @@ msgid ""
"\\t\\tThese commands help you make changes to existing application resources."
msgstr ""
-#: pkg/kubectl/cmd/convert/convert.go:40
+#: pkg/kubectl/cmd/convert/convert.go:41
msgid ""
"\\n"
"\\t\\tConvert config files between different API versions. Both YAML\\n"
@@ -1173,7 +1191,7 @@ msgid ""
"\\t\\tCreate a cluster role."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:46
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:45
msgid ""
"\\n"
"\\t\\tCreate a config map based on a file, directory, or specified literal "
@@ -1193,13 +1211,13 @@ msgid ""
"\\t\\tsymlinks, devices, pipes, etc)."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:40
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_cronjob.go:41
msgid ""
"\\n"
"\\t\\tCreate a cron job with the specified name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:40
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_job.go:41
msgid ""
"\\n"
"\\t\\tCreate a job with the specified name."
@@ -1248,7 +1266,7 @@ msgid ""
"and description."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:74
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:73
msgid ""
"\\n"
"\\t\\tCreate a resource from a file or from stdin.\\n"
@@ -1275,7 +1293,7 @@ msgid ""
"\\t\\tCreate a role with single rule."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:70
msgid ""
"\\n"
"\\t\\tCreate a secret based on a file, directory, or specified literal value.\\n"
@@ -1295,13 +1313,25 @@ msgid ""
"\\t\\tsymlinks, devices, pipes, etc)."
msgstr ""
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:61
+msgid ""
+"\\n"
+"\\t\\tCreate a secret with specified type.\\n"
+"\\t\\t\\n"
+"\\t\\tA docker-registry type secret is for accessing a container registry.\\n"
+"\\n"
+"\\t\\tA generic type secret indicate an Opaque secret type.\\n"
+"\\n"
+"\\t\\tA tls type secret holds TLS certificate and its associated key."
+msgstr ""
+
#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:40
msgid ""
"\\n"
"\\t\\tCreate a service account with the specified name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:70
+#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:72
msgid ""
"\\n"
"\\t\\tCreates a proxy server or application-level gateway between localhost "
@@ -1314,7 +1344,7 @@ msgid ""
"static content path."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:43
+#: staging/src/k8s.io/kubectl/pkg/cmd/autoscale/autoscale.go:44
msgid ""
"\\n"
"\\t\\tCreates an autoscaler that automatically chooses and sets the number of "
@@ -1327,7 +1357,7 @@ msgid ""
"deployed within the system as needed."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:59
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:61
msgid ""
"\\n"
"\\t\\tDebug cluster resources using interactive debugging containers.\\n"
@@ -1352,7 +1382,7 @@ msgid ""
"\\t\\t the node's filesystem.\\n"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:46
+#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:47
msgid ""
"\\n"
"\\t\\tDelete resources by file names, stdin, resources and names, or by "
@@ -1410,7 +1440,7 @@ msgid ""
"\\t\\tthe discovery cache."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:177
+#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:178
msgid ""
"\\n"
"\\t\\tDeny a certificate signing request.\\n"
@@ -1418,11 +1448,26 @@ msgid ""
"\\t\\tkubectl certificate deny allows a cluster admin to deny a certificate\\n"
"\\t\\tsigning request (CSR). This action tells a certificate signing "
"controller to\\n"
-"\\t\\tnot to issue a certificate to the requestor.\\n"
+"\\t\\tnot to issue a certificate to the requester.\\n"
"\\t\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:53
+#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:37
+msgid ""
+"\\n"
+"\\t\\tDescribe fields and structure of various resources.\\n"
+"\\n"
+"\\t\\tThis command describes the fields associated with each supported API "
+"resource.\\n"
+"\\t\\tFields are identified via a simple JSONPath identifier:\\n"
+"\\n"
+"\\t\\t\\t<type>.<fieldName>[.<fieldName>]\\n"
+"\\n"
+"\\t\\tInformation about each field is retrieved from the server in OpenAPI "
+"format."
+msgstr ""
+
+#: staging/src/k8s.io/kubectl/pkg/cmd/diff/diff.go:52
msgid ""
"\\n"
"\\t\\tDiff configurations specified by file name or stdin between the current "
@@ -1452,29 +1497,17 @@ msgid ""
"convention."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39
-msgid ""
-"\\n"
-"\\t\\tDisplay Resource (CPU/Memory) usage.\\n"
-"\\n"
-"\\t\\tThe top command allows you to see the resource consumption for nodes or "
-"pods.\\n"
-"\\n"
-"\\t\\tThis command requires Metrics Server to be correctly configured and "
-"working on the server. "
-msgstr ""
-
-#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:50
+#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:51
msgid ""
"\\n"
-"\\t\\tDisplay events\\n"
+"\\t\\tDisplay events.\\n"
"\\n"
"\\t\\tPrints a table of the most important information about events.\\n"
"\\t\\tYou can request events for a namespace, for all namespace, or\\n"
"\\t\\tfiltered to only those pertaining to a specified resource."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:52
+#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:55
msgid ""
"\\n"
"\\t\\tDisplay merged kubeconfig settings or a specified kubeconfig file.\\n"
@@ -1483,7 +1516,7 @@ msgid ""
"jsonpath expression."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:90
+#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:89
msgid ""
"\\n"
"\\t\\tDisplay one or many resources.\\n"
@@ -1523,13 +1556,25 @@ msgid ""
"\\t\\tsince pod creation."
msgstr ""
+#: staging/src/k8s.io/kubectl/pkg/cmd/top/top.go:39
+msgid ""
+"\\n"
+"\\t\\tDisplay resource (CPU/memory) usage.\\n"
+"\\n"
+"\\t\\tThe top command allows you to see the resource consumption for nodes or "
+"pods.\\n"
+"\\n"
+"\\t\\tThis command requires Metrics Server to be correctly configured and "
+"working on the server. "
+msgstr ""
+
#: staging/src/k8s.io/kubectl/pkg/cmd/config/current_context.go:37
msgid ""
"\\n"
"\\t\\tDisplay the current-context."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:114
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:115
msgid ""
"\\n"
"\\t\\tDrain node in preparation for maintenance.\\n"
@@ -1568,7 +1613,7 @@ msgid ""
"\\t\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:31
+#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:32
msgid ""
"\\n"
"\\t\\tEdit a resource from the default editor.\\n"
@@ -1579,6 +1624,13 @@ msgid ""
"or EDITOR\\n"
"\\t\\tenvironment variables, or fall back to 'vi' for Linux or 'notepad' for "
"Windows.\\n"
+"\\t\\tWhen attempting to open the editor, it will first attempt to use the "
+"shell\\n"
+"\\t\\tthat has been defined in the 'SHELL' environment variable. If this is "
+"not defined,\\n"
+"\\t\\tthe default shell will be used, which is '/bin/bash' for Linux or 'cmd' "
+"for Windows.\\n"
+"\\n"
"\\t\\tYou can edit multiple objects, although changes are applied one at a "
"time. The command\\n"
"\\t\\taccepts file names as well as command-line arguments, although the files "
@@ -1641,7 +1693,7 @@ msgid ""
"\\t\\tsaved copy to include the latest resource version."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:54
+#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:55
msgid ""
"\\n"
"\\t\\tExperimental: Wait for a specific condition on one or many resources.\\n"
@@ -1660,7 +1712,7 @@ msgid ""
"destination."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:52
+#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:53
msgid ""
"\\n"
"\\t\\tExpose a resource as a new Kubernetes service.\\n"
@@ -1694,42 +1746,25 @@ msgid ""
"\\t\\t- begin with \\"kubectl-\\"\\n"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:38
-msgid ""
-"\\n"
-"\\t\\tList the fields for supported resources.\\n"
-"\\n"
-"\\t\\tThis command describes the fields associated with each supported API "
-"resource.\\n"
-"\\t\\tFields are identified via a simple JSONPath identifier:\\n"
-"\\n"
-"\\t\\t\\t<type>.<fieldName>[.<fieldName>]\\n"
-"\\n"
-"\\t\\tAdd the --recursive flag to display all of the fields at once without "
-"descriptions.\\n"
-"\\t\\tInformation about each field is retrieved from the server in OpenAPI "
-"format."
-msgstr ""
-
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:30
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:31
msgid ""
"\\n"
"\\t\\tManage the rollout of one or many resources."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:85
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:86
msgid ""
"\\n"
"\\t\\tMark node as schedulable."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:56
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:57
msgid ""
"\\n"
"\\t\\tMark node as unschedulable."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:58
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:59
msgid ""
"\\n"
"\\t\\tMark the provided resource as paused.\\n"
@@ -1766,7 +1801,7 @@ msgid ""
"of zsh >= 5.2."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:50
+#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:51
msgid ""
"\\n"
"\\t\\tPrint the logs for a container in a pod or specified resource. \\n"
@@ -1814,7 +1849,7 @@ msgid ""
"\\t\\t $ kubectl get TYPE NAME -o yaml"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:58
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:60
msgid ""
"\\n"
"\\t\\tRestart a resource.\\n"
@@ -1822,7 +1857,7 @@ msgid ""
"\\t Resource rollout will be restarted."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:59
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:60
msgid ""
"\\n"
"\\t\\tResume a paused resource.\\n"
@@ -1838,7 +1873,7 @@ msgid ""
"\\t\\tRoll back to a previous rollout."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:48
+#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:47
msgid ""
"\\n"
"\\t\\tSet a cluster entry in kubeconfig.\\n"
@@ -1856,7 +1891,7 @@ msgid ""
"existing values for those fields."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:40
+#: staging/src/k8s.io/kubectl/pkg/cmd/scale/scale.go:41
msgid ""
"\\n"
"\\t\\tSet a new size for a deployment, replica set, replication controller, or "
@@ -1872,7 +1907,7 @@ msgid ""
"\\t\\tscale is sent to the server."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:70
+#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:69
#, c-format
msgid ""
"\\n"
@@ -1937,7 +1972,7 @@ msgid ""
"prefixed with NAME_PREFIX."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:47
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:48
msgid ""
"\\n"
"\\t\\tShow the status of the rollout.\\n"
@@ -1954,7 +1989,7 @@ msgid ""
"\\t\\tuse --revision=N where N is the revision you need to watch for."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:41
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_resources.go:42
#, c-format
msgid ""
"\\n"
@@ -1969,7 +2004,7 @@ msgid ""
"\\t\\tPossible resources include (case insensitive): %s."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:51
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_env.go:52
msgid ""
"\\n"
"\\t\\tUpdate environment variables on a pod template.\\n"
@@ -2012,7 +2047,7 @@ msgid ""
"\\t\\tNote: Strategic merge patch is not supported for custom resources."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:83
+#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:112
msgid ""
"\\n"
"\\t\\tUpdate the annotations on one or more resources.\\n"
@@ -2068,7 +2103,7 @@ msgid ""
"\\t\\t* Currently taint can only apply to node."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:37
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:38
msgid ""
"\\n"
"\\t\\tView previous rollout revisions and configurations."
@@ -2100,7 +2135,7 @@ msgid ""
"\\t kubectl create namespace my-namespace"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:74
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:83
msgid ""
"\\n"
"\\t # Create a new secret named my-secret with keys for each file in folder "
@@ -2152,35 +2187,34 @@ msgid ""
"\\tkubectl create deployment my-dep --image=busybox --port=5701"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:358
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:352
msgid ""
"\\n"
"\\t# Create a new ExternalName service named my-ns\\n"
"\\tkubectl create service externalname my-ns --external-name bar.com"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:57
+#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:58
msgid ""
"\\n"
-"\\t# List recent events in the default namespace.\\n"
+"\\t# List recent events in the default namespace\\n"
"\\tkubectl events\\n"
"\\n"
-"\\t# List recent events in all namespaces.\\n"
+"\\t# List recent events in all namespaces\\n"
"\\tkubectl events --all-namespaces\\n"
"\\n"
"\\t# List recent events for the specified pod, then wait for more events and "
-"list them as they arrive.\\n"
+"list them as they arrive\\n"
"\\tkubectl events --for pod/web-pod-13je7 --watch\\n"
"\\n"
-"\\t# List recent events in given format. Supported ones, apart from default, "
-"are json and yaml.\\n"
+"\\t# List recent events in YAML format\\n"
"\\tkubectl events -oyaml\\n"
"\\n"
-"\\t# List recent only events in given event types\\n"
+"\\t# List recent only events of type 'Warning' or 'Normal'\\n"
"\\tkubectl events --types=Warning,Normal"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:50
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:51
msgid ""
"\\n"
"\\t# Set deployment nginx-deployment's service account to serviceaccount1\\n"
@@ -2199,7 +2233,7 @@ msgid ""
"\\tCreate a deployment with the specified name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:351
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:345
msgid ""
"\\n"
"\\tCreate an ExternalName service with the specified name.\\n"
@@ -2247,7 +2281,7 @@ msgid ""
"an attribute name or a map key. Map keys may not contain dots."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:43
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:44
msgid ""
"\\n"
"\\tUpdate the service account of pod template resources.\\n"
@@ -2257,7 +2291,7 @@ msgid ""
"\\t"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:40
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_subject.go:41
msgid ""
"\\n"
"\\tUpdate the user, group, or service account in a role binding or cluster "
@@ -2286,7 +2320,7 @@ msgid ""
" to resume forwarding."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:240
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:234
msgid ""
"\\n"
" # Create a new ClusterIP service named my-cs\\n"
@@ -2296,21 +2330,21 @@ msgid ""
" kubectl create service clusterip my-cs --clusterip=\\"None\\""
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:318
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:312
msgid ""
"\\n"
" # Create a new LoadBalancer service named my-lbs\\n"
" kubectl create service loadbalancer my-lbs --tcp=5678:8080"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:281
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:275
msgid ""
"\\n"
" # Create a new NodePort service named my-ns\\n"
" kubectl create service nodeport my-ns --tcp=5678:8080"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:102
+#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:103
msgid ""
"\\n"
" # Dump current cluster state to stdout\\n"
@@ -2327,7 +2361,7 @@ msgid ""
"directory=/path/to/cluster-state"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:95
+#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:124
msgid ""
"\\n"
" # Update pod 'foo' with the annotation 'description' and the value 'my "
@@ -2357,25 +2391,25 @@ msgid ""
" kubectl annotate pods foo description-"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:237
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:231
msgid ""
"\\n"
" Create a ClusterIP service with the specified name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:315
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:309
msgid ""
"\\n"
" Create a LoadBalancer service with the specified name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:278
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:272
msgid ""
"\\n"
" Create a NodePort service with the specified name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:93
+#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:94
msgid ""
"\\n"
" Dump cluster information out suitable for debugging and diagnosing "
@@ -2392,7 +2426,7 @@ msgid ""
" based on namespace and pod name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:40
+#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:41
msgid ""
"\\n"
" Display addresses of the control plane and services with label kubernetes."
@@ -2422,18 +2456,18 @@ msgstr ""
msgid " is used and no merging takes place."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:108
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107
msgid ""
"A comma-delimited set of quota scopes that must all match each object "
"tracked by the quota."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:107
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:106
msgid ""
"A comma-delimited set of resource=quantity pairs that define a hard limit."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:114
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113
msgid ""
"A label selector to use for this budget. Only equality-based selector "
"requirements are supported."
@@ -2453,7 +2487,7 @@ msgid ""
"IP in addition to its generated service IP."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:184
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:193
msgid "Allocate a TTY for the debugging container."
msgstr ""
@@ -2468,26 +2502,26 @@ msgstr ""
msgid "Annotations to apply to the pod."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:198
+#: staging/src/k8s.io/kubectl/pkg/cmd/apply/apply.go:203
msgid "Apply a configuration to a resource by file name or stdin"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:128
+#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:129
msgid "Approve a certificate signing request"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:270
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:264
msgid ""
"Assign your own ClusterIP or set to 'None' for a 'headless' service (no "
"loadbalancing)."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:108
+#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:109
msgid ""
"Attach to a process that is already running inside an existing container."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:107
+#: staging/src/k8s.io/kubectl/pkg/cmd/attach/attach.go:108
msgid "Attach to a running container"
msgstr ""
@@ -2502,11 +2536,11 @@ msgid ""
"set to 'None' to create a headless service."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:102
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:101
msgid "ClusterRole this ClusterRoleBinding should reference"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:105
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:107
msgid "ClusterRole this RoleBinding should reference"
msgstr ""
@@ -2514,39 +2548,39 @@ msgstr ""
msgid "Commands for features in alpha"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:176
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:185
msgid "Container image to use for debug container."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:172
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:181
msgid "Container name to use for debug container."
msgstr ""
-#: pkg/kubectl/cmd/convert/convert.go:95
+#: pkg/kubectl/cmd/convert/convert.go:96
msgid "Convert config files between different API versions"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:99
+#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:98
msgid "Copy files and directories to and from containers"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:100
+#: staging/src/k8s.io/kubectl/pkg/cmd/cp/cp.go:99
msgid "Copy files and directories to and from containers."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:255
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:249
msgid "Create a ClusterIP service"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:330
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:324
msgid "Create a LoadBalancer service"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:293
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:287
msgid "Create a NodePort service"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:95
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:94
msgid "Create a TLS secret"
msgstr ""
@@ -2554,15 +2588,15 @@ msgstr ""
msgid "Create a cluster role"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:88
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_clusterrolebinding.go:87
msgid "Create a cluster role binding for a particular cluster role"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:123
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_configmap.go:121
msgid "Create a config map from a local file, directory or literal value"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:173
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:182
msgid "Create a copy of the target Pod with this name."
msgstr ""
@@ -2570,7 +2604,7 @@ msgstr ""
msgid "Create a cron job with the specified name"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:101
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_deployment.go:100
msgid "Create a deployment with the specified name"
msgstr ""
@@ -2578,51 +2612,47 @@ msgstr ""
msgid "Create a job with the specified name"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:85
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_namespace.go:84
msgid "Create a namespace with the specified name"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:96
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:95
msgid "Create a pod disruption budget with the specified name"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:93
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:92
msgid "Create a priority class with the specified name"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:92
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_quota.go:91
msgid "Create a quota with the specified name"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:109
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create.go:108
msgid "Create a resource from a file or from stdin"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:90
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:92
msgid "Create a role binding for a particular role or cluster role"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:172
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_role.go:171
msgid "Create a role with single rule"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:135
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:134
msgid "Create a secret for use with a Docker registry"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:138
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:146
msgid "Create a secret from a local file, directory, or literal value"
msgstr ""
#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:49
-msgid "Create a secret using specified subcommand"
-msgstr ""
-
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:50
-msgid "Create a secret using specified subcommand."
+msgid "Create a secret using a specified subcommand"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:86
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_serviceaccount.go:85
msgid "Create a service account with the specified name"
msgstr ""
@@ -2634,24 +2664,26 @@ msgstr ""
msgid "Create a service using a specified subcommand."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:370
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:364
msgid "Create an ExternalName service"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:146
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_ingress.go:145
msgid "Create an ingress with the specified name"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:60
+#: staging/src/k8s.io/kubectl/pkg/cmd/run/run.go:61
msgid "Create and run a particular image in a pod."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:155
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:162
msgid "Create debugging sessions for troubleshooting workloads and nodes"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:185
-msgid "Debugging profile."
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:194
+msgid ""
+"Debugging profile. Options are \\"legacy\\", \\"general\\", \\"baseline\\", "
+"\\"netadmin\\", or \\"restricted\\"."
msgstr ""
#: staging/src/k8s.io/kubectl/pkg/cmd/delete/delete.go:146
@@ -2684,7 +2716,7 @@ msgstr ""
msgid "Delete the specified user from the kubeconfig."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:176
+#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:177
msgid "Deny a certificate signing request"
msgstr ""
@@ -2696,7 +2728,7 @@ msgstr ""
msgid "Diff the live version against a would-be applied version"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:65
+#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo.go:66
msgid "Display cluster information"
msgstr ""
@@ -2708,7 +2740,7 @@ msgstr ""
msgid "Display clusters defined in the kubeconfig."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:79
+#: staging/src/k8s.io/kubectl/pkg/cmd/config/view.go:82
msgid "Display merged kubeconfig settings or a specified kubeconfig file"
msgstr ""
@@ -2716,7 +2748,7 @@ msgstr ""
msgid "Display one or many contexts from the kubeconfig file."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:166
+#: staging/src/k8s.io/kubectl/pkg/cmd/get/get.go:164
msgid "Display one or many resources"
msgstr ""
@@ -2744,15 +2776,15 @@ msgstr ""
msgid "Display users defined in the kubeconfig."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:185
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:186
msgid "Drain node in preparation for maintenance"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:74
+#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:75
msgid "Dump relevant information for debugging and diagnosis"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:78
+#: staging/src/k8s.io/kubectl/pkg/cmd/edit/edit.go:83
msgid "Edit a resource on the server"
msgstr ""
@@ -2760,27 +2792,27 @@ msgstr ""
msgid "Edit latest last-applied-configuration annotations of a resource/object"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152
msgid "Email for Docker registry"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:175
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:184
msgid "Environment variables to set in the container."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:89
+#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:91
msgid "Execute a command in a container"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:90
+#: staging/src/k8s.io/kubectl/pkg/cmd/exec/exec.go:92
msgid "Execute a command in a container."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:123
+#: staging/src/k8s.io/kubectl/pkg/cmd/wait/wait.go:124
msgid "Experimental: Wait for a specific condition on one or many resources"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:385
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_service.go:379
msgid "External name of service"
msgstr ""
@@ -2788,7 +2820,7 @@ msgstr ""
msgid "Forward one or more local ports to a pod"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:97
+#: staging/src/k8s.io/kubectl/pkg/cmd/explain/explain.go:102
msgid "Get documentation for a resource"
msgstr ""
@@ -2808,7 +2840,7 @@ msgid ""
"values: 'None', 'ClientIP'"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:156
+#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:187
msgid ""
"If non-empty, the annotation update will only succeed if this is the current "
"resource-version for the object. Only valid when specifying a single "
@@ -2822,7 +2854,7 @@ msgid ""
"resource."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:170
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:179
msgid ""
"If specified, everything after -- will be passed to the new container as "
"Args instead of Command."
@@ -2832,18 +2864,18 @@ msgstr ""
msgid "If true, run the container in privileged mode."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:180
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:189
msgid "If true, suppress informational messages."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:171
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:180
msgid ""
"If true, wait for the container to start running, and then attach as if "
"'kubectl attach ...' were called. Default false, unless '-i/--stdin' is "
"set, in which case the default is true."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:179
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:188
msgid ""
"Keep stdin open on the container(s) in the pod, even if nothing is attached."
msgstr ""
@@ -2852,28 +2884,31 @@ msgstr ""
msgid "List all visible plugin executables on a user's PATH"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:125
+#: staging/src/k8s.io/kubectl/pkg/cmd/events/events.go:126
msgid "List events"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:60
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout.go:61
msgid "Manage the rollout of a resource"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:99
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:100
msgid "Mark node as schedulable"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:70
+#: staging/src/k8s.io/kubectl/pkg/cmd/drain/drain.go:71
msgid "Mark node as unschedulable"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:84
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_pause.go:85
msgid "Mark the provided resource as paused"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:47
#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:48
+msgid "Modify certificate resources"
+msgstr ""
+
+#: staging/src/k8s.io/kubectl/pkg/cmd/certificates/certificates.go:49
msgid "Modify certificate resources."
msgstr ""
@@ -2887,11 +2922,11 @@ msgid ""
"traffic to. Optional."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:53
+#: staging/src/k8s.io/kubectl/pkg/cmd/alpha.go:41
msgid "No alpha commands are available in this version of kubectl"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:175
+#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:176
msgid ""
"Only return logs after a specific date (RFC3339). Defaults to all logs. Only "
"one of since-time / since may be used."
@@ -2903,21 +2938,21 @@ msgid ""
"powershell)"
msgstr ""
-#: pkg/kubectl/cmd/convert/convert.go:105
+#: pkg/kubectl/cmd/convert/convert.go:106
msgid ""
"Output the formatted object with the given group version (for ex: "
"'extensions/v1beta1')."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:152
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151
msgid "Password for Docker registry authentication"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:110
msgid "Path to PEM encoded public key certificate."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:112
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_tls.go:111
msgid "Path to private key associated with given certificate."
msgstr ""
@@ -2941,25 +2976,25 @@ msgstr ""
msgid "Print the list of flags inherited by all commands"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:153
+#: staging/src/k8s.io/kubectl/pkg/cmd/logs/logs.go:154
msgid "Print the logs for a container in a pod"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:101
+#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:102
msgid "Print the supported API resources on the server"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:102
+#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiresources.go:103
msgid "Print the supported API resources on the server."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:58
+#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59
msgid ""
"Print the supported API versions on the server, in the form of \\"group/"
"version\\""
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:59
+#: staging/src/k8s.io/kubectl/pkg/cmd/apiresources/apiversions.go:60
msgid ""
"Print the supported API versions on the server, in the form of \\"group/"
"version\\"."
@@ -2973,19 +3008,19 @@ msgstr ""
msgid "Rename a context from the kubeconfig file"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:121
+#: staging/src/k8s.io/kubectl/pkg/cmd/replace/replace.go:119
msgid "Replace a resource by file name or stdin"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:91
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_restart.go:93
msgid "Restart a resource"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:88
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_resume.go:89
msgid "Resume a paused resource"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:106
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_rolebinding.go:108
msgid "Role this RoleBinding should reference"
msgstr ""
@@ -2993,15 +3028,15 @@ msgstr ""
msgid "Run a particular image on the cluster"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:122
+#: staging/src/k8s.io/kubectl/pkg/cmd/proxy/proxy.go:124
msgid "Run a proxy to the Kubernetes API server"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:154
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:153
msgid "Server location for Docker registry"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:77
+#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_cluster.go:76
msgid "Set a cluster entry in kubeconfig"
msgstr ""
@@ -3013,7 +3048,7 @@ msgstr ""
msgid "Set a new size for a deployment, replica set, or replication controller"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:158
+#: staging/src/k8s.io/kubectl/pkg/cmd/config/set_credentials.go:157
msgid "Set a user entry in kubeconfig"
msgstr ""
@@ -3021,7 +3056,7 @@ msgstr ""
msgid "Set an individual value in a kubeconfig file"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:39
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set.go:41
msgid "Set specific features on objects"
msgstr ""
@@ -3043,7 +3078,7 @@ msgstr ""
msgid "Show details of a specific resource or group of resources"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:103
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_status.go:104
msgid "Show the status of the rollout"
msgstr ""
@@ -3063,18 +3098,18 @@ msgid ""
"be specified by the client and defaulted by the server."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:178
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:187
msgid ""
"The image pull policy for the container. If left empty, this value will not "
"be specified by the client and defaulted by the server."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:113
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112
msgid ""
"The maximum number or percentage of unavailable pods this budget requires."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:112
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_pdb.go:111
msgid ""
"The minimum number or percentage of available pods this budget requires."
msgstr ""
@@ -3108,7 +3143,7 @@ msgid ""
"The restart policy for this Pod. Legal values [Always, OnFailure, Never]."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:156
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret.go:164
msgid "The type of secret to create"
msgstr ""
@@ -3144,7 +3179,7 @@ msgstr ""
msgid "Update resource requests/limits on objects with pod templates"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:135
+#: staging/src/k8s.io/kubectl/pkg/cmd/annotate/annotate.go:153
msgid "Update the annotations on a resource"
msgstr ""
@@ -3170,11 +3205,11 @@ msgid ""
"binding"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:151
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_secret_docker.go:150
msgid "Username for Docker registry authentication"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:85
+#: staging/src/k8s.io/kubectl/pkg/cmd/rollout/rollout_history.go:86
msgid "View rollout history"
msgstr ""
@@ -3183,74 +3218,74 @@ msgid ""
"View the latest last-applied-configuration annotations of a resource/object"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:177
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:186
msgid ""
"When used with '--copy-to', a list of name=image pairs for changing "
"container images, similar to how 'kubectl set image' works."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:174
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:183
msgid "When used with '--copy-to', delete the original Pod."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:182
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:191
msgid ""
"When used with '--copy-to', enable process namespace sharing in the copy."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:181
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:190
msgid ""
"When used with '--copy-to', schedule the copy of target Pod on the same node."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:183
+#: staging/src/k8s.io/kubectl/pkg/cmd/debug/debug.go:192
msgid ""
"When using an ephemeral container, target processes in this container name."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:85
+#: staging/src/k8s.io/kubectl/pkg/cmd/clusterinfo/clusterinfo_dump.go:86
msgid ""
"Where to output the files. If empty or '-' uses stdout, otherwise creates a "
"directory hierarchy in that directory"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108
msgid ""
"description is an arbitrary string that usually provides guidelines on when "
"this priority class should be used."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:88
+#: staging/src/k8s.io/kubectl/pkg/cmd/run/run_test.go:89
msgid "dummy restart flag)"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:108
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107
msgid ""
"global-default specifies whether this PriorityClass should be considered as "
"the default priority."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:274
+#: staging/src/k8s.io/kubectl/pkg/cmd/cmd.go:317
msgid "kubectl controls the Kubernetes cluster manager"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:50
+#: staging/src/k8s.io/kubectl/pkg/cmd/expose/expose.go:51
msgid ""
"pod (po), service (svc), replicationcontroller (rc), deployment (deploy), "
"replicaset (rs)"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:110
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:109
msgid ""
"preemption-policy is the policy for preempting pods with lower priority."
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:41
+#: staging/src/k8s.io/kubectl/pkg/cmd/set/set_serviceaccount.go:42
msgid ""
"replicationcontroller (rc), deployment (deploy), daemonset (ds), job, "
"replicaset (rs), statefulset"
msgstr ""
-#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:107
+#: staging/src/k8s.io/kubectl/pkg/cmd/create/create_priorityclass.go:106
msgid "the value of this priority class."
msgstr ""
|
98
|
Skip terminal Pods with a deletion timestamp from the Daemonset sync (#118716)
* Skip terminal Pods with a deletion timestamp from the Daemonset sync
Change-Id: I64a347a87c02ee2bd48be10e6fff380c8c81f742
* Review comments and fix integration test
Change-Id: I3eb5ec62bce8b4b150726a1e9b2b517c4e993713
* Include deleted terminal pods in history
Change-Id: I8b921157e6be1c809dd59f8035ec259ea4d96301
|
2023-08-18
| null |
index d8a2f67dcf8..ffcebcb17db 100644
--- a/pkg/controller/daemon/daemon_controller.go
+++ b/pkg/controller/daemon/daemon_controller.go
@@ -752,7 +752,7 @@ func (dsc *DaemonSetsController) getDaemonPods(ctx context.Context, ds *apps.Dae
// This also reconciles ControllerRef by adopting/orphaning.
// Note that returned Pods are pointers to objects in the cache.
// If you want to modify one, you need to deep-copy it first.
-func (dsc *DaemonSetsController) getNodesToDaemonPods(ctx context.Context, ds *apps.DaemonSet) (map[string][]*v1.Pod, error) {
+func (dsc *DaemonSetsController) getNodesToDaemonPods(ctx context.Context, ds *apps.DaemonSet, includeDeletedTerminal bool) (map[string][]*v1.Pod, error) {
claimedPods, err := dsc.getDaemonPods(ctx, ds)
if err != nil {
return nil, err
@@ -761,6 +761,12 @@ func (dsc *DaemonSetsController) getNodesToDaemonPods(ctx context.Context, ds *a
nodeToDaemonPods := make(map[string][]*v1.Pod)
logger := klog.FromContext(ctx)
for _, pod := range claimedPods {
+ if !includeDeletedTerminal && podutil.IsPodTerminal(pod) && pod.DeletionTimestamp != nil {
+ // This Pod has a finalizer or is already scheduled for deletion from the
+ // store by the kubelet or the Pod GC. The DS controller doesn't have
+ // anything else to do with it.
+ continue
+ }
nodeName, err := util.GetTargetNodeName(pod)
if err != nil {
logger.Info("Failed to get target node name of Pod in DaemonSet",
@@ -953,7 +959,7 @@ func (dsc *DaemonSetsController) updateDaemonSet(ctx context.Context, ds *apps.D
// syncNodes with a list of pods to remove and a list of nodes to run a Pod of ds.
func (dsc *DaemonSetsController) manage(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
// Find out the pods which are created for the nodes by DaemonSet.
- nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
+ nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds, false)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
@@ -1154,7 +1160,7 @@ func storeDaemonSetStatus(
func (dsc *DaemonSetsController) updateDaemonSetStatus(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string, updateObservedGen bool) error {
logger := klog.FromContext(ctx)
logger.V(4).Info("Updating daemon set status")
- nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
+ nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds, false)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
diff --git a/pkg/controller/daemon/daemon_controller_test.go b/pkg/controller/daemon/daemon_controller_test.go
index 72646863cd3..bfaff5b6316 100644
--- a/pkg/controller/daemon/daemon_controller_test.go
+++ b/pkg/controller/daemon/daemon_controller_test.go
@@ -2739,75 +2739,128 @@ func TestDeleteUnscheduledPodForNotExistingNode(t *testing.T) {
}
func TestGetNodesToDaemonPods(t *testing.T) {
- for _, strategy := range updateStrategies() {
- ds := newDaemonSet("foo")
- ds.Spec.UpdateStrategy = *strategy
- ds2 := newDaemonSet("foo2")
- ds2.Spec.UpdateStrategy = *strategy
- _, ctx := ktesting.NewTestContext(t)
- manager, _, _, err := newTestController(ctx, ds, ds2)
- if err != nil {
- t.Fatalf("error creating DaemonSets controller: %v", err)
- }
- err = manager.dsStore.Add(ds)
- if err != nil {
- t.Fatal(err)
- }
- err = manager.dsStore.Add(ds2)
- if err != nil {
- t.Fatal(err)
- }
- addNodes(manager.nodeStore, 0, 2, nil)
-
- // These pods should be returned.
- wantedPods := []*v1.Pod{
- newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds),
- newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil),
- newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds),
- newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil),
- }
- failedPod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds)
- failedPod.Status = v1.PodStatus{Phase: v1.PodFailed}
- wantedPods = append(wantedPods, failedPod)
- for _, pod := range wantedPods {
- manager.podStore.Add(pod)
- }
-
- // These pods should be ignored.
- ignoredPods := []*v1.Pod{
- newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
- newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
- newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
- }
- for _, pod := range ignoredPods {
- err = manager.podStore.Add(pod)
+ ds := newDaemonSet("foo")
+ ds2 := newDaemonSet("foo2")
+ cases := map[string]struct {
+ includeDeletedTerminal bool
+ wantedPods []*v1.Pod
+ ignoredPods []*v1.Pod
+ }{
+ "exclude deleted terminal pods": {
+ wantedPods: []*v1.Pod{
+ newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds),
+ newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil),
+ newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds),
+ newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil),
+ func() *v1.Pod {
+ pod := newPod("matching-owned-succeeded-pod-0-", "node-0", simpleDaemonSetLabel, ds)
+ pod.Status = v1.PodStatus{Phase: v1.PodSucceeded}
+ return pod
+ }(),
+ func() *v1.Pod {
+ pod := newPod("matching-owned-failed-pod-1-", "node-1", simpleDaemonSetLabel, ds)
+ pod.Status = v1.PodStatus{Phase: v1.PodFailed}
+ return pod
+ }(),
+ },
+ ignoredPods: []*v1.Pod{
+ newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
+ newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
+ newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
+ func() *v1.Pod {
+ pod := newPod("matching-owned-succeeded-deleted-pod-0-", "node-0", simpleDaemonSetLabel, ds)
+ now := metav1.Now()
+ pod.DeletionTimestamp = &now
+ pod.Status = v1.PodStatus{Phase: v1.PodSucceeded}
+ return pod
+ }(),
+ func() *v1.Pod {
+ pod := newPod("matching-owned-failed-deleted-pod-1-", "node-1", simpleDaemonSetLabel, ds)
+ now := metav1.Now()
+ pod.DeletionTimestamp = &now
+ pod.Status = v1.PodStatus{Phase: v1.PodFailed}
+ return pod
+ }(),
+ },
+ },
+ "include deleted terminal pods": {
+ includeDeletedTerminal: true,
+ wantedPods: []*v1.Pod{
+ newPod("matching-owned-0-", "node-0", simpleDaemonSetLabel, ds),
+ newPod("matching-orphan-0-", "node-0", simpleDaemonSetLabel, nil),
+ newPod("matching-owned-1-", "node-1", simpleDaemonSetLabel, ds),
+ newPod("matching-orphan-1-", "node-1", simpleDaemonSetLabel, nil),
+ func() *v1.Pod {
+ pod := newPod("matching-owned-succeeded-pod-0-", "node-0", simpleDaemonSetLabel, ds)
+ pod.Status = v1.PodStatus{Phase: v1.PodSucceeded}
+ return pod
+ }(),
+ func() *v1.Pod {
+ pod := newPod("matching-owned-failed-deleted-pod-1-", "node-1", simpleDaemonSetLabel, ds)
+ now := metav1.Now()
+ pod.DeletionTimestamp = &now
+ pod.Status = v1.PodStatus{Phase: v1.PodFailed}
+ return pod
+ }(),
+ },
+ ignoredPods: []*v1.Pod{
+ newPod("non-matching-owned-0-", "node-0", simpleDaemonSetLabel2, ds),
+ newPod("non-matching-orphan-1-", "node-1", simpleDaemonSetLabel2, nil),
+ newPod("matching-owned-by-other-0-", "node-0", simpleDaemonSetLabel, ds2),
+ },
+ },
+ }
+ for name, tc := range cases {
+ t.Run(name, func(t *testing.T) {
+ _, ctx := ktesting.NewTestContext(t)
+ manager, _, _, err := newTestController(ctx, ds, ds2)
+ if err != nil {
+ t.Fatalf("error creating DaemonSets controller: %v", err)
+ }
+ err = manager.dsStore.Add(ds)
if err != nil {
t.Fatal(err)
}
- }
+ err = manager.dsStore.Add(ds2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ addNodes(manager.nodeStore, 0, 2, nil)
- nodesToDaemonPods, err := manager.getNodesToDaemonPods(context.TODO(), ds)
- if err != nil {
- t.Fatalf("getNodesToDaemonPods() error: %v", err)
- }
- gotPods := map[string]bool{}
- for node, pods := range nodesToDaemonPods {
- for _, pod := range pods {
- if pod.Spec.NodeName != node {
- t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName)
+ for _, pod := range tc.wantedPods {
+ manager.podStore.Add(pod)
+ }
+
+ for _, pod := range tc.ignoredPods {
+ err = manager.podStore.Add(pod)
+ if err != nil {
+ t.Fatal(err)
}
- gotPods[pod.Name] = true
}
- }
- for _, pod := range wantedPods {
- if !gotPods[pod.Name] {
- t.Errorf("expected pod %v but didn't get it", pod.Name)
+
+ nodesToDaemonPods, err := manager.getNodesToDaemonPods(context.TODO(), ds, tc.includeDeletedTerminal)
+ if err != nil {
+ t.Fatalf("getNodesToDaemonPods() error: %v", err)
}
- delete(gotPods, pod.Name)
- }
- for podName := range gotPods {
- t.Errorf("unexpected pod %v was returned", podName)
- }
+ gotPods := map[string]bool{}
+ for node, pods := range nodesToDaemonPods {
+ for _, pod := range pods {
+ if pod.Spec.NodeName != node {
+ t.Errorf("pod %v grouped into %v but belongs in %v", pod.Name, node, pod.Spec.NodeName)
+ }
+ gotPods[pod.Name] = true
+ }
+ }
+ for _, pod := range tc.wantedPods {
+ if !gotPods[pod.Name] {
+ t.Errorf("expected pod %v but didn't get it", pod.Name)
+ }
+ delete(gotPods, pod.Name)
+ }
+ for podName := range gotPods {
+ t.Errorf("unexpected pod %v was returned", podName)
+ }
+ })
}
}
diff --git a/pkg/controller/daemon/update.go b/pkg/controller/daemon/update.go
index 2665b170c04..d7755da95d6 100644
--- a/pkg/controller/daemon/update.go
+++ b/pkg/controller/daemon/update.go
@@ -43,7 +43,7 @@ import (
// remaining within the constraints imposed by the update strategy.
func (dsc *DaemonSetsController) rollingUpdate(ctx context.Context, ds *apps.DaemonSet, nodeList []*v1.Node, hash string) error {
logger := klog.FromContext(ctx)
- nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
+ nodeToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds, false)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
@@ -294,7 +294,8 @@ func (dsc *DaemonSetsController) constructHistory(ctx context.Context, ds *apps.
}
func (dsc *DaemonSetsController) cleanupHistory(ctx context.Context, ds *apps.DaemonSet, old []*apps.ControllerRevision) error {
- nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds)
+ // Include deleted terminal pods when maintaining history.
+ nodesToDaemonPods, err := dsc.getNodesToDaemonPods(ctx, ds, true)
if err != nil {
return fmt.Errorf("couldn't get node to daemon pod mapping for daemon set %q: %v", ds.Name, err)
}
diff --git a/test/integration/daemonset/daemonset_test.go b/test/integration/daemonset/daemonset_test.go
index 4c22d1cc525..c1d84951745 100644
--- a/test/integration/daemonset/daemonset_test.go
+++ b/test/integration/daemonset/daemonset_test.go
@@ -49,6 +49,7 @@ import (
"k8s.io/kubernetes/pkg/scheduler/profile"
labelsutil "k8s.io/kubernetes/pkg/util/labels"
"k8s.io/kubernetes/test/integration/framework"
+ testutils "k8s.io/kubernetes/test/integration/util"
"k8s.io/kubernetes/test/utils/ktesting"
)
@@ -155,6 +156,7 @@ func newDaemonSet(name, namespace string) *apps.DaemonSet {
}
func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet) {
+ t.Helper()
ds, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
if err != nil {
t.Errorf("Failed to get DaemonSet %s/%s: %v", ds.Namespace, ds.Name, err)
@@ -176,6 +178,10 @@ func cleanupDaemonSets(t *testing.T, cs clientset.Interface, ds *apps.DaemonSet)
return
}
+ if len(ds.Spec.Template.Finalizers) > 0 {
+ testutils.RemovePodFinalizersInNamespace(context.TODO(), cs, t, ds.Namespace)
+ }
+
// Wait for the daemon set controller to kill all the daemon pods.
if err := wait.Poll(100*time.Millisecond, 30*time.Second, func() (bool, error) {
updatedDS, err := cs.AppsV1().DaemonSets(ds.Namespace).Get(context.TODO(), ds.Name, metav1.GetOptions{})
@@ -275,9 +281,7 @@ func validateDaemonSetPodsAndMarkReady(
) {
if err := wait.Poll(time.Second, 60*time.Second, func() (bool, error) {
objects := podInformer.GetIndexer().List()
- if len(objects) != numberPods {
- return false, nil
- }
+ nonTerminatedPods := 0
for _, object := range objects {
pod := object.(*v1.Pod)
@@ -294,6 +298,10 @@ func validateDaemonSetPodsAndMarkReady(
t.Errorf("controllerRef.Controller is not set to true")
}
+ if podutil.IsPodPhaseTerminal(pod.Status.Phase) {
+ continue
+ }
+ nonTerminatedPods++
if !podutil.IsPodReady(pod) && len(pod.Spec.NodeName) != 0 {
podCopy := pod.DeepCopy()
podCopy.Status = v1.PodStatus{
@@ -307,7 +315,7 @@ func validateDaemonSetPodsAndMarkReady(
}
}
- return true, nil
+ return nonTerminatedPods == numberPods, nil
}); err != nil {
t.Fatal(err)
}
@@ -536,8 +544,23 @@ func TestSimpleDaemonSetLaunchesPods(t *testing.T) {
}
func TestSimpleDaemonSetRestartsPodsOnTerminalPhase(t *testing.T) {
- for _, podPhase := range []v1.PodPhase{v1.PodSucceeded, v1.PodFailed} {
- t.Run(string(podPhase), func(t *testing.T) {
+ cases := map[string]struct {
+ phase v1.PodPhase
+ finalizer bool
+ }{
+ "Succeeded": {
+ phase: v1.PodSucceeded,
+ },
+ "Failed": {
+ phase: v1.PodFailed,
+ },
+ "Succeeded with finalizer": {
+ phase: v1.PodSucceeded,
+ finalizer: true,
+ },
+ }
+ for name, tc := range cases {
+ t.Run(name, func(t *testing.T) {
forEachStrategy(t, func(t *testing.T, strategy *apps.DaemonSetUpdateStrategy) {
ctx, closeFn, dc, informers, clientset := setup(t)
defer closeFn()
@@ -553,6 +576,9 @@ func TestSimpleDaemonSetRestartsPodsOnTerminalPhase(t *testing.T) {
go dc.Run(ctx, 2)
ds := newDaemonSet("restart-terminal-pod", ns.Name)
+ if tc.finalizer {
+ ds.Spec.Template.Finalizers = append(ds.Spec.Template.Finalizers, "test.k8s.io/finalizer")
+ }
ds.Spec.UpdateStrategy = *strategy
if _, err := dsClient.Create(ctx, ds, metav1.CreateOptions{}); err != nil {
t.Fatalf("Failed to create DaemonSet: %v", err)
@@ -566,9 +592,9 @@ func TestSimpleDaemonSetRestartsPodsOnTerminalPhase(t *testing.T) {
validateDaemonSetStatus(dsClient, ds.Name, int32(numNodes), t)
podToMarkAsTerminal := podInformer.GetIndexer().List()[0].(*v1.Pod)
podCopy := podToMarkAsTerminal.DeepCopy()
- podCopy.Status.Phase = podPhase
+ podCopy.Status.Phase = tc.phase
if _, err := podClient.UpdateStatus(ctx, podCopy, metav1.UpdateOptions{}); err != nil {
- t.Fatalf("Failed to mark the pod as terminal with phase: %v. Error: %v", podPhase, err)
+ t.Fatalf("Failed to mark the pod as terminal with phase: %v. Error: %v", tc.phase, err)
}
// verify all pods are active. They either continue Running or are Pending after restart
validateDaemonSetPodsActive(podClient, podInformer, numNodes, t)
diff --git a/test/integration/podgc/podgc_test.go b/test/integration/podgc/podgc_test.go
index 76ca9b9ed73..4957e9fc443 100644
--- a/test/integration/podgc/podgc_test.go
+++ b/test/integration/podgc/podgc_test.go
@@ -116,7 +116,7 @@ func TestPodGcOrphanedPodsWithFinalizer(t *testing.T) {
if err != nil {
t.Fatalf("Error %v, while creating pod: %v", err, klog.KObj(pod))
}
- defer testutils.RemovePodFinalizers(testCtx.ClientSet, t, []*v1.Pod{pod})
+ defer testutils.RemovePodFinalizers(testCtx.Ctx, testCtx.ClientSet, t, *pod)
pod.Status.Phase = test.phase
if _, err := testCtx.ClientSet.CoreV1().Pods(testCtx.NS.Name).UpdateStatus(testCtx.Ctx, pod, metav1.UpdateOptions{}); err != nil {
@@ -224,7 +224,7 @@ func TestTerminatingOnOutOfServiceNode(t *testing.T) {
t.Fatalf("Error %v, while creating pod: %v", err, klog.KObj(pod))
}
if test.withFinalizer {
- defer testutils.RemovePodFinalizers(testCtx.ClientSet, t, []*v1.Pod{pod})
+ defer testutils.RemovePodFinalizers(testCtx.Ctx, testCtx.ClientSet, t, *pod)
}
// trigger termination of the pod, but with long grace period so that it is not removed immediately
diff --git a/test/integration/util/util.go b/test/integration/util/util.go
index 359459e6846..69f24213fa2 100644
--- a/test/integration/util/util.go
+++ b/test/integration/util/util.go
@@ -18,6 +18,7 @@ package util
import (
"context"
+ "encoding/json"
"errors"
"fmt"
"testing"
@@ -27,6 +28,7 @@ import (
policy "k8s.io/api/policy/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/uuid"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/apiserver/pkg/admission"
@@ -229,17 +231,33 @@ func CleanupTest(t *testing.T, testCtx *TestContext) {
testCtx.CloseFn()
}
+func RemovePodFinalizersInNamespace(ctx context.Context, cs clientset.Interface, t *testing.T, ns string) {
+ t.Helper()
+ pods, err := cs.CoreV1().Pods(ns).List(ctx, metav1.ListOptions{})
+ if err != nil {
+ t.Fatalf("Failed obtaining list of pods: %v", err)
+ }
+ RemovePodFinalizers(ctx, cs, t, pods.Items...)
+}
+
// RemovePodFinalizers removes pod finalizers for the pods
-func RemovePodFinalizers(cs clientset.Interface, t *testing.T, pods []*v1.Pod) {
+func RemovePodFinalizers(ctx context.Context, cs clientset.Interface, t *testing.T, pods ...v1.Pod) {
+ t.Helper()
for _, p := range pods {
- pod, err := cs.CoreV1().Pods(p.Namespace).Get(context.TODO(), p.Name, metav1.GetOptions{})
+ pod, err := cs.CoreV1().Pods(p.Namespace).Get(ctx, p.Name, metav1.GetOptions{})
if err != nil && !apierrors.IsNotFound(err) {
- t.Errorf("error while removing pod finalizers for %v: %v", klog.KObj(p), err)
- } else if pod != nil {
- pod.ObjectMeta.Finalizers = nil
- _, err = cs.CoreV1().Pods(pod.Namespace).Update(context.TODO(), pod, metav1.UpdateOptions{})
+ t.Errorf("error while removing pod finalizers for %v: %v", klog.KObj(&p), err)
+ } else if pod != nil && len(pod.Finalizers) > 0 {
+ // Use Patch to remove finalizer, instead of Update, to avoid transient
+ // conflicts.
+ patchBytes, _ := json.Marshal(map[string]interface{}{
+ "metadata": map[string]interface{}{
+ "$deleteFromPrimitiveList/finalizers": pod.Finalizers,
+ },
+ })
+ _, err = cs.CoreV1().Pods(pod.Namespace).Patch(ctx, pod.Name, types.StrategicMergePatchType, patchBytes, metav1.PatchOptions{})
if err != nil {
- t.Errorf("error while updating pod status for %v: %v", klog.KObj(p), err)
+ t.Errorf("error while updating pod status for %v: %v", klog.KObj(&p), err)
}
}
}
|
50
|
Merge pull request #118922 from champtar/kubeadm-backdate-ca
kubeadm: backdate generated CAs
|
2023-08-18
| null | null |
51
|
Merge pull request #118182 from seans3/wsstream-refactor
Refactor wsstream library from apiserver to apimachinery
|
2023-08-18
| null | null |
52
|
Exclude terminal pods from Daemonset e2e tests
Change-Id: Ic29ca1739ebdc54822d1751fcd56a99c628021c4
|
2023-08-18
| null |
index d55e991277a..460b882fe18 100644
--- a/test/e2e/apps/daemon_set.go
+++ b/test/e2e/apps/daemon_set.go
@@ -39,6 +39,7 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/runtime/schema"
+ "k8s.io/apimachinery/pkg/selection"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/intstr"
"k8s.io/apimachinery/pkg/util/sets"
@@ -74,6 +75,16 @@ const (
// node selectors labels to namespaces
var NamespaceNodeSelectors = []string{"scheduler.alpha.kubernetes.io/node-selector"}
+var nonTerminalPhaseSelector = func() labels.Selector {
+ var reqs []labels.Requirement
+ for _, phase := range []v1.PodPhase{v1.PodFailed, v1.PodSucceeded} {
+ req, _ := labels.NewRequirement("status.phase", selection.NotEquals, []string{string(phase)})
+ reqs = append(reqs, *req)
+ }
+ selector := labels.NewSelector()
+ return selector.Add(reqs...)
+}()
+
type updateDSFunc func(*appsv1.DaemonSet)
// updateDaemonSetWithRetries updates daemonsets with the given applyUpdate func
@@ -1025,7 +1036,10 @@ func newDaemonSetWithLabel(dsName, image string, label map[string]string) *appsv
func listDaemonPods(ctx context.Context, c clientset.Interface, ns string, label map[string]string) *v1.PodList {
selector := labels.Set(label).AsSelector()
- options := metav1.ListOptions{LabelSelector: selector.String()}
+ options := metav1.ListOptions{
+ LabelSelector: selector.String(),
+ FieldSelector: nonTerminalPhaseSelector.String(),
+ }
podList, err := c.CoreV1().Pods(ns).List(ctx, options)
framework.ExpectNoError(err)
gomega.Expect(podList.Items).ToNot(gomega.BeEmpty())
|
53
|
Merge pull request #118280 from stlaz/e2e_psa_labels
Set all PSa labels in tests
|
2023-08-18
| null | null |
54
|
Merge pull request #117927 from kaisoz/add-FailedToRetrieveImagePullSecret-event
Log a warning if a ImagePullSecrets does not exist
|
2023-08-18
| null |
index 663ac7fb3a8,ca1ee492feb..7d958234820
--- a/pkg/kubelet/kubelet_pods_test.go
+++ b/pkg/kubelet/kubelet_pods_test.go
@@@ -52,8 -50,11 +52,9 @@@ import
"k8s.io/kubernetes/pkg/features"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
containertest "k8s.io/kubernetes/pkg/kubelet/container/testing"
- "k8s.io/kubernetes/pkg/kubelet/cri/streaming/portforward"
- "k8s.io/kubernetes/pkg/kubelet/cri/streaming/remotecommand"
"k8s.io/kubernetes/pkg/kubelet/metrics"
"k8s.io/kubernetes/pkg/kubelet/prober/results"
+ "k8s.io/kubernetes/pkg/kubelet/secret"
"k8s.io/kubernetes/pkg/kubelet/status"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
netutils "k8s.io/utils/net"
|
55
|
Delete CRDs created during field validation tests.
|
2023-08-18
| null |
index 5ff9041ca90..155d42c9e9c 100644
--- a/test/e2e/apimachinery/field_validation.go
+++ b/test/e2e/apimachinery/field_validation.go
@@ -247,6 +247,11 @@ var _ = SIGDescribe("FieldValidation", func() {
framework.Failf("cannot create crd %s", err)
}
+ defer func() {
+ err = fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient)
+ framework.ExpectNoError(err, "deleting CustomResourceDefinition")
+ }()
+
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
@@ -257,8 +262,6 @@ apiVersion: %s
kind: %s
metadata:
name: %s
- finalizers:
- - test-finalizer
spec:
foo: foo1
cronSpec: "* * * * */5"
@@ -304,6 +307,11 @@ spec:
framework.Failf("cannot create crd %s", err)
}
+ defer func() {
+ err = fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient)
+ framework.ExpectNoError(err, "deleting CustomResourceDefinition")
+ }()
+
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
@@ -314,8 +322,6 @@ apiVersion: %s
kind: %s
metadata:
name: %s
- finalizers:
- - test-finalizer
spec:
unknown: uk1
cronSpec: "* * * * */5"
@@ -424,6 +430,11 @@ spec:
framework.Failf("cannot create crd %s", err)
}
+ defer func() {
+ err = fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient)
+ framework.ExpectNoError(err, "deleting CustomResourceDefinition")
+ }()
+
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
@@ -434,8 +445,6 @@ apiVersion: %s
kind: %s
metadata:
name: %s
- finalizers:
- - test-finalizer
unknownField: unknown
spec:
foo: foo1
@@ -563,6 +572,11 @@ spec:
framework.Failf("cannot create crd %s", err)
}
+ defer func() {
+ err = fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient)
+ framework.ExpectNoError(err, "deleting CustomResourceDefinition")
+ }()
+
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
@@ -574,8 +588,6 @@ kind: %s
metadata:
name: %s
unknownMeta: unknown
- finalizers:
- - test-finalizer
spec:
template:
apiversion: foo/v1
@@ -690,6 +702,11 @@ spec:
framework.Failf("cannot create crd %s", err)
}
+ defer func() {
+ err = fixtures.DeleteV1CustomResourceDefinition(noxuDefinition, apiExtensionClient)
+ framework.ExpectNoError(err, "deleting CustomResourceDefinition")
+ }()
+
kind := noxuDefinition.Spec.Names.Kind
apiVersion := noxuDefinition.Spec.Group + "/" + noxuDefinition.Spec.Versions[0].Name
name := "mytest"
@@ -700,8 +717,6 @@ apiVersion: %s
kind: %s
metadata:
name: %s
- finalizers:
- - test-finalizer
spec:
unknown: uk1
foo: foo1
@@ -718,7 +733,7 @@ spec:
Param("fieldValidation", "Strict").
Body(yamlBody).
DoRaw(ctx)
- if !(strings.Contains(string(result), `line 11: key \\"foo\\" already set in map`)) {
+ if !(strings.Contains(string(result), `line 9: key \\"foo\\" already set in map`)) {
framework.Failf("error missing duplicate field: %v:\\n%v", err, string(result))
}
})
|
56
|
Merge pull request #118943 from SataQiu/clean-kubeadm-20230628
kubeadm: remove unused Kind and Resource functions from kubeadm internal API
|
2023-08-18
| null | null |
57
|
kubeadm: remove unused Kind and Resource functions from output API
|
2023-08-18
| null |
index d7b7fcdd8e9..4eb647d8e3a 100644
--- a/cmd/kubeadm/app/apis/output/register.go
+++ b/cmd/kubeadm/app/apis/output/register.go
@@ -34,16 +34,6 @@ var (
AddToScheme = SchemeBuilder.AddToScheme
)
-// Kind takes an unqualified kind and returns a Group qualified GroupKind
-func Kind(kind string) schema.GroupKind {
- return SchemeGroupVersion.WithKind(kind).GroupKind()
-}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&BootstrapToken{},
diff --git a/cmd/kubeadm/app/apis/output/v1alpha2/register.go b/cmd/kubeadm/app/apis/output/v1alpha2/register.go
index 7db53fa653d..7e01a1393ec 100644
--- a/cmd/kubeadm/app/apis/output/v1alpha2/register.go
+++ b/cmd/kubeadm/app/apis/output/v1alpha2/register.go
@@ -43,16 +43,6 @@ func init() {
localSchemeBuilder.Register(addKnownTypes)
}
-// Kind takes an unqualified kind and returns a Group qualified GroupKind
-func Kind(kind string) schema.GroupKind {
- return SchemeGroupVersion.WithKind(kind).GroupKind()
-}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&BootstrapToken{},
|
58
|
apiserver: introduce larger buckets for flowcontrol request_wait_duration_seconds bucket
Signed-off-by: Andrew Sy Kim <[email protected]>
|
2023-08-18
| null |
index 7cb05df6c89..a6b22a1a62b 100644
--- a/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
+++ b/staging/src/k8s.io/apiserver/pkg/util/flowcontrol/metrics/metrics.go
@@ -47,7 +47,7 @@ const (
var (
queueLengthBuckets = []float64{0, 10, 25, 50, 100, 250, 500, 1000}
- requestDurationSecondsBuckets = []float64{0, 0.005, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 30}
+ requestDurationSecondsBuckets = []float64{0, 0.005, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10, 15, 30}
)
var registerMetrics sync.Once
diff --git a/test/instrumentation/documentation/documentation-list.yaml b/test/instrumentation/documentation/documentation-list.yaml
index 7791a8ff5e0..719613011d3 100644
--- a/test/instrumentation/documentation/documentation-list.yaml
+++ b/test/instrumentation/documentation/documentation-list.yaml
@@ -3612,6 +3612,7 @@
- 2
- 5
- 10
+ - 15
- 30
- name: seat_fair_frac
subsystem: flowcontrol
|
59
|
kubeadm: remove unused Kind and Resource functions from kubeadm internal API
|
2023-08-18
| null |
index c85e1c34908..99dca3cdbcd 100644
--- a/cmd/kubeadm/app/apis/kubeadm/register.go
+++ b/cmd/kubeadm/app/apis/kubeadm/register.go
@@ -34,16 +34,6 @@ var (
AddToScheme = SchemeBuilder.AddToScheme
)
-// Kind takes an unqualified kind and returns a Group qualified GroupKind
-func Kind(kind string) schema.GroupKind {
- return SchemeGroupVersion.WithKind(kind).GroupKind()
-}
-
-// Resource takes an unqualified resource and returns a Group qualified GroupResource
-func Resource(resource string) schema.GroupResource {
- return SchemeGroupVersion.WithResource(resource).GroupResource()
-}
-
func addKnownTypes(scheme *runtime.Scheme) error {
scheme.AddKnownTypes(SchemeGroupVersion,
&InitConfiguration{},
|
61
|
Add unit tests for parallel StatefulSet create & delete
|
2023-08-18
| null |
index bebaa4469d5..c7fe131996c 100644
--- a/pkg/controller/statefulset/stateful_set_control_test.go
+++ b/pkg/controller/statefulset/stateful_set_control_test.go
@@ -2362,6 +2362,12 @@ type requestTracker struct {
requests int
err error
after int
+
+ parallelLock sync.Mutex
+ parallel int
+ maxParallel int
+
+ delay time.Duration
}
func (rt *requestTracker) errorReady() bool {
@@ -2371,16 +2377,31 @@ func (rt *requestTracker) errorReady() bool {
}
func (rt *requestTracker) inc() {
+ rt.parallelLock.Lock()
+ rt.parallel++
+ if rt.maxParallel < rt.parallel {
+ rt.maxParallel = rt.parallel
+ }
+ rt.parallelLock.Unlock()
+
rt.Lock()
defer rt.Unlock()
rt.requests++
+ if rt.delay != 0 {
+ time.Sleep(rt.delay)
+ }
}
func (rt *requestTracker) reset() {
+ rt.parallelLock.Lock()
+ rt.parallel = 0
+ rt.parallelLock.Unlock()
+
rt.Lock()
defer rt.Unlock()
rt.err = nil
rt.after = 0
+ rt.delay = 0
}
func newRequestTracker(requests int, err error, after int) requestTracker {
@@ -2851,6 +2872,182 @@ func fakeResourceVersion(object interface{}) {
}
}
+func TestParallelScale(t *testing.T) {
+ for _, tc := range []struct {
+ desc string
+ replicas int32
+ desiredReplicas int32
+ }{
+ {
+ desc: "scale up from 3 to 30",
+ replicas: 3,
+ desiredReplicas: 30,
+ },
+ {
+ desc: "scale down from 10 to 1",
+ replicas: 10,
+ desiredReplicas: 1,
+ },
+
+ {
+ desc: "scale down to 0",
+ replicas: 501,
+ desiredReplicas: 0,
+ },
+ {
+ desc: "scale up from 0",
+ replicas: 0,
+ desiredReplicas: 1000,
+ },
+ } {
+ t.Run(tc.desc, func(t *testing.T) {
+ set := burst(newStatefulSet(0))
+ parallelScale(t, set, tc.replicas, tc.desiredReplicas, assertBurstInvariants)
+ })
+ }
+
+}
+
+func parallelScale(t *testing.T, set *apps.StatefulSet, replicas, desiredReplicas int32, invariants invariantFunc) {
+ var err error
+ diff := desiredReplicas - replicas
+ client := fake.NewSimpleClientset(set)
+ om, _, ssc := setupController(client)
+ om.createPodTracker.delay = time.Millisecond
+
+ *set.Spec.Replicas = replicas
+ if err := parallelScaleUpStatefulSetControl(set, ssc, om, invariants); err != nil {
+ t.Errorf("Failed to turn up StatefulSet : %s", err)
+ }
+ set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
+ if err != nil {
+ t.Fatalf("Error getting updated StatefulSet: %v", err)
+ }
+ if set.Status.Replicas != replicas {
+ t.Errorf("want %v, got %v replicas", replicas, set.Status.Replicas)
+ }
+
+ fn := parallelScaleUpStatefulSetControl
+ if diff < 0 {
+ fn = parallelScaleDownStatefulSetControl
+ }
+ *set.Spec.Replicas = desiredReplicas
+ if err := fn(set, ssc, om, invariants); err != nil {
+ t.Errorf("Failed to scale StatefulSet : %s", err)
+ }
+
+ set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
+ if err != nil {
+ t.Fatalf("Error getting updated StatefulSet: %v", err)
+ }
+
+ if set.Status.Replicas != desiredReplicas {
+ t.Errorf("Failed to scale statefulset to %v replicas, got %v replicas", desiredReplicas, set.Status.Replicas)
+ }
+
+ if (diff < -1 || diff > 1) && om.createPodTracker.maxParallel <= 1 {
+ t.Errorf("want max parallel requests > 1, got %v", om.createPodTracker.maxParallel)
+ }
+}
+
+func parallelScaleUpStatefulSetControl(set *apps.StatefulSet,
+ ssc StatefulSetControlInterface,
+ om *fakeObjectManager,
+ invariants invariantFunc) error {
+ selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
+ if err != nil {
+ return err
+ }
+
+ // Give up after 2 loops.
+ // 2 * 500 pods per loop = 1000 max pods <- this should be enough for all test cases.
+ // Anything slower than that (requiring more iterations) indicates a problem and should fail the test.
+ maxLoops := 2
+ loops := maxLoops
+ for set.Status.Replicas < *set.Spec.Replicas {
+ if loops < 1 {
+ return fmt.Errorf("after %v loops: want %v, got replicas %v", maxLoops, *set.Spec.Replicas, set.Status.Replicas)
+ }
+ loops--
+ pods, err := om.podsLister.Pods(set.Namespace).List(selector)
+ if err != nil {
+ return err
+ }
+ sort.Sort(ascendingOrdinal(pods))
+
+ ordinals := []int{}
+ for _, pod := range pods {
+ if pod.Status.Phase == "" {
+ ordinals = append(ordinals, getOrdinal(pod))
+ }
+ }
+ // ensure all pods are valid (have a phase)
+ for _, ord := range ordinals {
+ if pods, err = om.setPodPending(set, ord); err != nil {
+ return err
+ }
+ }
+
+ // run the controller once and check invariants
+ _, err = ssc.UpdateStatefulSet(context.TODO(), set, pods)
+ if err != nil {
+ return err
+ }
+ set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
+ if err != nil {
+ return err
+ }
+ if err := invariants(set, om); err != nil {
+ return err
+ }
+ }
+ return invariants(set, om)
+}
+
+func parallelScaleDownStatefulSetControl(set *apps.StatefulSet, ssc StatefulSetControlInterface, om *fakeObjectManager, invariants invariantFunc) error {
+ selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
+ if err != nil {
+ return err
+ }
+
+ // Give up after 2 loops.
+ // 2 * 500 pods per loop = 1000 max pods <- this should be enough for all test cases.
+ // Anything slower than that (requiring more iterations) indicates a problem and should fail the test.
+ maxLoops := 2
+ loops := maxLoops
+ for set.Status.Replicas > *set.Spec.Replicas {
+ if loops < 1 {
+ return fmt.Errorf("after %v loops: want %v replicas, got %v", maxLoops, *set.Spec.Replicas, set.Status.Replicas)
+ }
+ loops--
+ pods, err := om.podsLister.Pods(set.Namespace).List(selector)
+ if err != nil {
+ return err
+ }
+ sort.Sort(ascendingOrdinal(pods))
+ if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
+ return err
+ }
+ set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
+ if err != nil {
+ return err
+ }
+ if _, err = ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
+ return err
+ }
+ }
+
+ set, err = om.setsLister.StatefulSets(set.Namespace).Get(set.Name)
+ if err != nil {
+ return err
+ }
+ if err := invariants(set, om); err != nil {
+ return err
+ }
+
+ return nil
+}
+
func scaleUpStatefulSetControl(set *apps.StatefulSet,
ssc StatefulSetControlInterface,
om *fakeObjectManager,
|
62
|
apiserver: introduce larger buckets for request_filter_duration_seconds metric
Signed-off-by: Andrew Sy Kim <[email protected]>
|
2023-08-18
| null |
index 450a6653da6..ba2aed69d44 100644
--- a/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
+++ b/staging/src/k8s.io/apiserver/pkg/endpoints/metrics/metrics.go
@@ -229,7 +229,7 @@ var (
Subsystem: APIServerComponent,
Name: "request_filter_duration_seconds",
Help: "Request filter latency distribution in seconds, for each filter type",
- Buckets: []float64{0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0, 5.0},
+ Buckets: []float64{0.0001, 0.0003, 0.001, 0.003, 0.01, 0.03, 0.1, 0.3, 1.0, 5.0, 10.0, 15.0, 30.0},
StabilityLevel: compbasemetrics.ALPHA,
},
[]string{"filter"},
diff --git a/test/instrumentation/documentation/documentation-list.yaml b/test/instrumentation/documentation/documentation-list.yaml
index f29e16ef063..7791a8ff5e0 100644
--- a/test/instrumentation/documentation/documentation-list.yaml
+++ b/test/instrumentation/documentation/documentation-list.yaml
@@ -2727,6 +2727,9 @@
- 0.3
- 1
- 5
+ - 10
+ - 15
+ - 30
- name: request_post_timeout_total
subsystem: apiserver
help: Tracks the activity of the request handlers after the associated requests
|
63
|
Merge pull request #118202 from pohly/scheduler-perf-unit-test
scheduler-perf: run as integration tests
|
2023-08-18
| null | null |
64
|
Return Skip in PodTopologySpread#PreScore under specific conditions
Signed-off-by: utam0k <[email protected]>
|
2023-08-18
| null |
index bf5e8fe7d05..97770878f53 100644
--- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go
+++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring.go
@@ -120,9 +120,9 @@ func (pl *PodTopologySpread) PreScore(
return framework.AsStatus(fmt.Errorf("getting all nodes: %w", err))
}
- if len(filteredNodes) == 0 || len(allNodes) == 0 {
- // No nodes to score.
- return nil
+ if len(allNodes) == 0 {
+ // No need to score.
+ return framework.NewStatus(framework.Skip)
}
state := &preScoreState{
@@ -138,10 +138,9 @@ func (pl *PodTopologySpread) PreScore(
return framework.AsStatus(fmt.Errorf("calculating preScoreState: %w", err))
}
- // return if incoming pod doesn't have soft topology spread Constraints.
+ // return Skip if incoming pod doesn't have soft topology spread Constraints.
if len(state.Constraints) == 0 {
- cycleState.Write(preScoreStateKey, state)
- return nil
+ return framework.NewStatus(framework.Skip)
}
// Ignore parsing errors for backwards compatibility.
diff --git a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go
index 9870f082ef8..b7a4377e147 100644
--- a/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go
+++ b/pkg/scheduler/framework/plugins/podtopologyspread/scoring_test.go
@@ -42,6 +42,74 @@ import (
var podTopologySpreadFunc = frameworkruntime.FactoryAdapter(feature.Features{}, New)
+// TestPreScoreSkip tests the cases that TopologySpread#PreScore returns the Skip status.
+func TestPreScoreSkip(t *testing.T) {
+ tests := []struct {
+ name string
+ pod *v1.Pod
+ nodes []*v1.Node
+ objs []runtime.Object
+ config config.PodTopologySpreadArgs
+ }{
+ {
+ name: "the pod doesn't have soft topology spread Constraints",
+ pod: st.MakePod().Name("p").Namespace("default").Obj(),
+ config: config.PodTopologySpreadArgs{
+ DefaultingType: config.ListDefaulting,
+ },
+ nodes: []*v1.Node{
+ st.MakeNode().Name("node-a").Label(v1.LabelHostname, "node-a").Obj(),
+ st.MakeNode().Name("node-b").Label(v1.LabelHostname, "node-b").Obj(),
+ },
+ },
+ {
+ name: "default constraints and a replicaset that doesn't match",
+ pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").OwnerReference("rs2", appsv1.SchemeGroupVersion.WithKind("ReplicaSet")).Obj(),
+ config: config.PodTopologySpreadArgs{
+ DefaultConstraints: []v1.TopologySpreadConstraint{
+ {
+ MaxSkew: 2,
+ TopologyKey: "planet",
+ WhenUnsatisfiable: v1.ScheduleAnyway,
+ },
+ },
+ DefaultingType: config.ListDefaulting,
+ },
+ nodes: []*v1.Node{
+ st.MakeNode().Name("node-a").Label("planet", "mars").Obj(),
+ },
+ objs: []runtime.Object{
+ &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "rs1"}, Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("tar").Obj()}},
+ },
+ },
+ }
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ _, ctx := ktesting.NewTestContext(t)
+ ctx, cancel := context.WithCancel(ctx)
+ defer cancel()
+ informerFactory := informers.NewSharedInformerFactory(fake.NewSimpleClientset(tt.objs...), 0)
+ f, err := frameworkruntime.NewFramework(ctx, nil, nil,
+ frameworkruntime.WithSnapshotSharedLister(cache.NewSnapshot(nil, tt.nodes)),
+ frameworkruntime.WithInformerFactory(informerFactory))
+ if err != nil {
+ t.Fatalf("Failed creating framework runtime: %v", err)
+ }
+ pl, err := New(&tt.config, f, feature.Features{})
+ if err != nil {
+ t.Fatalf("Failed creating plugin: %v", err)
+ }
+ informerFactory.Start(ctx.Done())
+ informerFactory.WaitForCacheSync(ctx.Done())
+ p := pl.(*PodTopologySpread)
+ cs := framework.NewCycleState()
+ if s := p.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSkip() {
+ t.Fatalf("Expected skip but got %v", s.AsError())
+ }
+ })
+ }
+}
+
func TestPreScoreStateEmptyNodes(t *testing.T) {
tests := []struct {
name string
@@ -258,29 +326,6 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
TopologyNormalizingWeight: []float64{topologyNormalizingWeight(1), topologyNormalizingWeight(1)},
},
},
- {
- name: "default constraints and a replicaset that doesn't match",
- pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").OwnerReference("rs2", appsv1.SchemeGroupVersion.WithKind("ReplicaSet")).Obj(),
- config: config.PodTopologySpreadArgs{
- DefaultConstraints: []v1.TopologySpreadConstraint{
- {
- MaxSkew: 2,
- TopologyKey: "planet",
- WhenUnsatisfiable: v1.ScheduleAnyway,
- },
- },
- DefaultingType: config.ListDefaulting,
- },
- nodes: []*v1.Node{
- st.MakeNode().Name("node-a").Label("planet", "mars").Obj(),
- },
- objs: []runtime.Object{
- &appsv1.ReplicaSet{ObjectMeta: metav1.ObjectMeta{Namespace: "default", Name: "rs1"}, Spec: appsv1.ReplicaSetSpec{Selector: st.MakeLabelSelector().Exists("tar").Obj()}},
- },
- want: &preScoreState{
- TopologyPairToPodCounts: make(map[topologyPair]*int64),
- },
- },
{
name: "default constraints and a replicaset, but pod has constraints",
pod: st.MakePod().Name("p").Namespace("default").Label("foo", "bar").Label("baz", "sup").
@@ -545,7 +590,7 @@ func TestPreScoreStateEmptyNodes(t *testing.T) {
informerFactory.WaitForCacheSync(ctx.Done())
p := pl.(*PodTopologySpread)
cs := framework.NewCycleState()
- if s := p.PreScore(context.Background(), cs, tt.pod, tt.nodes); !s.IsSuccess() {
+ if s := p.PreScore(ctx, cs, tt.pod, tt.nodes); !s.IsSuccess() {
t.Fatal(s.AsError())
}
|
66
|
Merge pull request #117055 from cyclinder/csi_migration
remove CSI-migration gate
|
2023-08-18
| null | null |
68
|
Merge pull request #116729 from AxeZhan/handlers_sync
[Scheduler] Make sure handlers have synced before scheduling
|
2023-08-18
| null | null |
69
|
scheduler_perf: replace gomega.Eventually with wait.PollUntilContextTimeout
This is done for the sake of consistency. The failure message becomes less
useful.
|
2023-08-18
| null |
index 619d46bc39a..ecbfce6887b 100644
--- a/test/integration/scheduler_perf/scheduler_perf_test.go
+++ b/test/integration/scheduler_perf/scheduler_perf_test.go
@@ -31,7 +31,6 @@ import (
"time"
"github.com/google/go-cmp/cmp"
- "github.com/onsi/gomega"
v1 "k8s.io/api/core/v1"
resourcev1alpha2 "k8s.io/api/resource/v1alpha2"
@@ -51,7 +50,6 @@ import (
"k8s.io/component-base/featuregate"
featuregatetesting "k8s.io/component-base/featuregate/testing"
"k8s.io/component-base/metrics/legacyregistry"
- "k8s.io/kubernetes/pkg/features"
"k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/apis/config/scheme"
"k8s.io/kubernetes/pkg/scheduler/apis/config/validation"
@@ -1221,41 +1219,22 @@ func cleanupWorkload(ctx context.Context, tb testing.TB, tc *testCase, client cl
// actually removing a namespace can take some time (garbage collecting
// other generated object like secrets, etc.) and we don't want to
// start the next workloads while that cleanup is still going on.
- gomega.NewGomegaWithT(tb).Eventually(ctx, func(ctx context.Context) ([]interface{}, error) {
- var objects []interface{}
+ if err := wait.PollUntilContextTimeout(ctx, time.Second, 5*time.Minute, false, func(ctx context.Context) (bool, error) {
namespaces, err := client.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
if err != nil {
- return nil, err
+ return false, err
}
- // Collecting several objects of interest (pods, claims) is done to
- // provide a more informative failure message when a namespace doesn't
- // disappear quickly enough.
for _, namespace := range namespaces.Items {
- if _, ok := numPodsScheduledPerNamespace[namespace.Name]; !ok {
- // Not a namespace created by the workload.
- continue
- }
- pods, err := client.CoreV1().Pods(namespace.Name).List(ctx, metav1.ListOptions{})
- if err != nil {
- return nil, err
- }
- if len(pods.Items) > 0 {
- // Record one pod per namespace - that's usually enough information.
- objects = append(objects, pods.Items[0])
+ if _, ok := numPodsScheduledPerNamespace[namespace.Name]; ok {
+ // A namespace created by the workload, need to wait.
+ return false, nil
}
- if tc.FeatureGates[features.DynamicResourceAllocation] {
- claims, err := client.ResourceV1alpha2().ResourceClaims(namespace.Name).List(ctx, metav1.ListOptions{})
- if err != nil {
- return nil, err
- }
- if len(claims.Items) > 0 {
- objects = append(objects, claims.Items[0])
- }
- }
- objects = append(objects, namespace)
}
- return objects, nil
- }).WithTimeout(5*time.Minute).Should(gomega.BeEmpty(), "deleting namespaces")
+ // All namespaces gone.
+ return true, nil
+ }); err != nil {
+ tb.Fatalf("failed while waiting for namespace removal: %v", err)
+ }
}
func createNamespaceIfNotPresent(ctx context.Context, tb testing.TB, client clientset.Interface, namespace string, podsPerNamespace *map[string]int) {
|
72
|
Merge pull request #118917 from kmala/daemonsetfix
increase the log level for the GetTargetNodeName error message
|
2023-08-18
| null |
index ffcebcb17db,7f477453d06..8802eeb81dc
--- a/pkg/controller/daemon/daemon_controller.go
+++ b/pkg/controller/daemon/daemon_controller.go
@@@ -761,15 -761,9 +761,15 @@@ func (dsc *DaemonSetsController) getNod
nodeToDaemonPods := make(map[string][]*v1.Pod)
logger := klog.FromContext(ctx)
for _, pod := range claimedPods {
+ if !includeDeletedTerminal && podutil.IsPodTerminal(pod) && pod.DeletionTimestamp != nil {
+ // This Pod has a finalizer or is already scheduled for deletion from the
+ // store by the kubelet or the Pod GC. The DS controller doesn't have
+ // anything else to do with it.
+ continue
+ }
nodeName, err := util.GetTargetNodeName(pod)
if err != nil {
- logger.Info("Failed to get target node name of Pod in DaemonSet",
+ logger.V(4).Info("Failed to get target node name of Pod in DaemonSet",
"pod", klog.KObj(pod), "daemonset", klog.KObj(ds))
continue
}
|
73
|
scheduler_perf: fix goroutine leak in runWorkload
This becomes relevant when doing more fine-grained leak checking.
|
2023-08-18
| null |
index a39e76eb6b4..ba051aa8c9c 100644
--- a/test/integration/scheduler_perf/scheduler_perf_test.go
+++ b/test/integration/scheduler_perf/scheduler_perf_test.go
@@ -802,6 +802,12 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
// need to start again.
podInformer := informerFactory.Core().V1().Pods()
+ // Everything else started by this function gets stopped before it returns.
+ ctx, cancel := context.WithCancel(ctx)
+ var wg sync.WaitGroup
+ defer wg.Wait()
+ defer cancel()
+
var mu sync.Mutex
var dataItems []DataItem
nextNodeIndex := 0
@@ -876,9 +882,14 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
concreteOp.PodTemplatePath = tc.DefaultPodTemplatePath
}
var collectors []testDataCollector
+ // This needs a separate context and wait group because
+ // the code below needs to be sure that the goroutines
+ // are stopped.
var collectorCtx context.Context
var collectorCancel func()
var collectorWG sync.WaitGroup
+ defer collectorWG.Wait()
+
if concreteOp.CollectMetrics {
collectorCtx, collectorCancel = context.WithCancel(ctx)
defer collectorCancel()
@@ -987,7 +998,9 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
switch concreteOp.Mode {
case Create:
+ wg.Add(1)
go func() {
+ defer wg.Done()
count, threshold := 0, concreteOp.Number
if threshold == 0 {
threshold = math.MaxInt32
@@ -1005,7 +1018,9 @@ func runWorkload(ctx context.Context, b *testing.B, tc *testCase, w *workload, c
}
}()
case Recreate:
+ wg.Add(1)
go func() {
+ defer wg.Done()
retVals := make([][]string, len(churnFns))
// For each churn function, instantiate a slice of strings with length "concreteOp.Number".
for i := range retVals {
|
74
|
test/integration: avoid errors in fake PC controller during shutdown
Once the context is canceled, the controller can stop processing
events. Without this change it prints errors when the apiserver is already
down.
|
2023-08-18
| null |
index 359459e6846..4a0451d326d 100644
--- a/test/integration/util/util.go
+++ b/test/integration/util/util.go
@@ -127,7 +127,13 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface, i
claimRef := obj.Spec.ClaimRef
pvc, err := clientSet.CoreV1().PersistentVolumeClaims(claimRef.Namespace).Get(ctx, claimRef.Name, metav1.GetOptions{})
if err != nil {
- klog.Errorf("error while getting %v/%v: %v", claimRef.Namespace, claimRef.Name, err)
+ // Note that the error can be anything, because components like
+ // apiserver are also shutting down at the same time, but this
+ // check is conservative and only ignores the "context canceled"
+ // error while shutting down.
+ if ctx.Err() == nil || !errors.Is(err, context.Canceled) {
+ klog.Errorf("error while getting %v/%v: %v", claimRef.Namespace, claimRef.Name, err)
+ }
return
}
@@ -136,7 +142,10 @@ func StartFakePVController(ctx context.Context, clientSet clientset.Interface, i
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, pvutil.AnnBindCompleted, "yes")
_, err := clientSet.CoreV1().PersistentVolumeClaims(claimRef.Namespace).Update(ctx, pvc, metav1.UpdateOptions{})
if err != nil {
- klog.Errorf("error while updating %v/%v: %v", claimRef.Namespace, claimRef.Name, err)
+ if ctx.Err() == nil || !errors.Is(err, context.Canceled) {
+ // Shutting down, no need to record this.
+ klog.Errorf("error while updating %v/%v: %v", claimRef.Namespace, claimRef.Name, err)
+ }
return
}
}
|
75
|
kubeadm: backdate generated CAs by 5 minutes
This allow for a small time jump backward after
certificates generation.
Signed-off-by: Etienne Champetier <[email protected]>
|
2023-08-18
| null |
index 62f08a61c3a..8d5cea7a985 100644
--- a/cmd/kubeadm/app/constants/constants.go
+++ b/cmd/kubeadm/app/constants/constants.go
@@ -44,6 +44,8 @@ const (
// should be joined with KubernetesDir.
TempDirForKubeadm = "tmp"
+ // CertificateBackdate defines the offset applied to notBefore for CA certificates generated by kubeadm
+ CertificateBackdate = time.Minute * 5
// CertificateValidity defines the validity for all the signed certificates generated by kubeadm
CertificateValidity = time.Hour * 24 * 365
diff --git a/cmd/kubeadm/app/util/pkiutil/pki_helpers.go b/cmd/kubeadm/app/util/pkiutil/pki_helpers.go
index 7887a2fbac3..dc78d1b3f6b 100644
--- a/cmd/kubeadm/app/util/pkiutil/pki_helpers.go
+++ b/cmd/kubeadm/app/util/pkiutil/pki_helpers.go
@@ -74,6 +74,8 @@ func NewCertificateAuthority(config *CertConfig) (*x509.Certificate, crypto.Sign
return nil, nil, errors.Wrap(err, "unable to create private key while generating CA certificate")
}
+ // backdate CA certificate to allow small time jumps
+ config.Config.NotBefore = time.Now().Add(-kubeadmconstants.CertificateBackdate)
cert, err := certutil.NewSelfSignedCACert(config.Config, key)
if err != nil {
return nil, nil, errors.Wrap(err, "unable to create self-signed CA certificate")
|
76
|
client-go: allow to set NotBefore in NewSelfSignedCACert()
Signed-off-by: Etienne Champetier <[email protected]>
|
2023-08-18
| null |
index 37b023ef25d..91e171271af 100644
--- a/staging/src/k8s.io/client-go/util/cert/cert.go
+++ b/staging/src/k8s.io/client-go/util/cert/cert.go
@@ -45,6 +45,7 @@ type Config struct {
Organization []string
AltNames AltNames
Usages []x509.ExtKeyUsage
+ NotBefore time.Time
}
// AltNames contains the domain names and IP addresses that will be added
@@ -64,6 +65,10 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro
return nil, err
}
serial = new(big.Int).Add(serial, big.NewInt(1))
+ notBefore := now.UTC()
+ if !cfg.NotBefore.IsZero() {
+ notBefore = cfg.NotBefore.UTC()
+ }
tmpl := x509.Certificate{
SerialNumber: serial,
Subject: pkix.Name{
@@ -71,7 +76,7 @@ func NewSelfSignedCACert(cfg Config, key crypto.Signer) (*x509.Certificate, erro
Organization: cfg.Organization,
},
DNSNames: []string{cfg.CommonName},
- NotBefore: now.UTC(),
+ NotBefore: notBefore,
NotAfter: now.Add(duration365d * 10).UTC(),
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
BasicConstraintsValid: true,
|
77
|
Merge pull request #118102 from RomanBednar/retro-sc-assignment-ga
graduate RetroactiveDefaultStorageClass feature to GA in 1.28
|
2023-08-18
| null | null |
78
|
Merge pull request #118910 from dims/better-url-for-scraping-metrics-from-kubelet
Better URL for scraping metrics from kubelet in node e2e tests
|
2023-08-18
| null | null |
79
|
Merge pull request #118876 from atiratree/fix-explain
kubectl explain should work for both cluster and namespace resources and without a GET method
|
2023-08-18
| null | null |
80
|
put feature gate in alphabetical order
|
2023-08-18
| null |
index 26ea0409fe3..c2190fd2193 100644
--- a/pkg/features/kube_features.go
+++ b/pkg/features/kube_features.go
@@ -606,6 +606,13 @@ const (
// the pod is being deleted due to a disruption.
PodDisruptionConditions featuregate.Feature = "PodDisruptionConditions"
+ // owner: @danielvegamyhre
+ // kep: https://kep.k8s.io/4017
+ // beta: v1.28
+ //
+ // Set pod completion index as a pod label for Indexed Jobs and StatefulSets.
+ PodIndexLabel featuregate.Feature = "PodIndexLabel"
+
// owner: @ddebroy
// alpha: v1.25
//
@@ -843,13 +850,6 @@ const (
//
// Enables In-Place Pod Vertical Scaling
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
-
- // owner: @danielvegamyhre
- // kep: https://kep.k8s.io/4017
- // beta: v1.28
- //
- // Set pod completion index as a pod label for Indexed Jobs and StatefulSets.
- PodIndexLabel featuregate.Feature = "PodIndexLabel"
)
func init() {
|
81
|
fix a reference to the wrong variable name
Signed-off-by: yanggang <[email protected]>
|
2023-08-18
| null |
index f33d5f320e4..016e493824d 100644
--- a/pkg/controller/util/endpoint/controller_utils.go
+++ b/pkg/controller/util/endpoint/controller_utils.go
@@ -185,7 +185,7 @@ func GetServicesToUpdateOnPodChange(serviceLister v1listers.ServiceLister, old,
if labelsChanged {
oldServices, err := GetPodServiceMemberships(serviceLister, oldPod)
if err != nil {
- utilruntime.HandleError(fmt.Errorf("unable to get pod %s/%s's service memberships: %v", newPod.Namespace, newPod.Name, err))
+ utilruntime.HandleError(fmt.Errorf("unable to get pod %s/%s's service memberships: %v", oldPod.Namespace, oldPod.Name, err))
}
services = determineNeededServiceUpdates(oldServices, services, podChanged)
}
|
82
|
Merge pull request #118909 from dims/bump-to-latest-node-problem-detector-version-with-arm64
Bump to latest node-problem-detector version with arm64
|
2023-08-18
| null | null |
83
|
Merge pull request #118904 from dims/cleanup-pods-at-the-end-in-pod-conditions-e2e-node-test
Cleanup pods at the end in Pod conditions e2e node test
|
2023-08-18
| null | null |
84
|
Merge pull request #118908 from kannon92/fix-feature-flag-order
Order feature gates alphabetically
|
2023-08-18
| null | null |
85
|
Better URL for scraping metrics from kubelet
Signed-off-by: Davanum Srinivas <[email protected]>
|
2023-08-18
| null |
index 0b246f4148d..5643dda6db3 100644
--- a/test/e2e_node/cpu_manager_metrics_test.go
+++ b/test/e2e_node/cpu_manager_metrics_test.go
@@ -164,7 +164,7 @@ var _ = SIGDescribe("CPU Manager Metrics [Serial][Feature:CPUManager]", func() {
func getKubeletMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) {
ginkgo.By("getting Kubelet metrics from the metrics API")
- return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, framework.TestContext.NodeName+":10255", "/metrics")
+ return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, nodeNameOrIP()+":10255", "/metrics")
}
func makeGuaranteedCPUExclusiveSleeperPod(name string, cpus int) *v1.Pod {
diff --git a/test/e2e_node/podresources_test.go b/test/e2e_node/podresources_test.go
index 3d644868027..430528b765a 100644
--- a/test/e2e_node/podresources_test.go
+++ b/test/e2e_node/podresources_test.go
@@ -965,7 +965,7 @@ func waitForTopologyUnawareResources(ctx context.Context, f *framework.Framework
func getPodResourcesMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) {
// we are running out of good names, so we need to be unnecessarily specific to avoid clashes
ginkgo.By("getting Pod Resources metrics from the metrics API")
- return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, framework.TestContext.NodeName+":10255", "/metrics")
+ return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, nodeNameOrIP()+":10255", "/metrics")
}
func timelessSampleAtLeast(lower interface{}) types.GomegaMatcher {
diff --git a/test/e2e_node/resource_metrics_test.go b/test/e2e_node/resource_metrics_test.go
index 66d92ef62d1..e81f969a230 100644
--- a/test/e2e_node/resource_metrics_test.go
+++ b/test/e2e_node/resource_metrics_test.go
@@ -131,7 +131,7 @@ var _ = SIGDescribe("ResourceMetricsAPI [NodeFeature:ResourceMetrics]", func() {
func getResourceMetrics(ctx context.Context) (e2emetrics.KubeletMetrics, error) {
ginkgo.By("getting stable resource metrics API")
- return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, framework.TestContext.NodeName+":10255", "/metrics/resource")
+ return e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, nodeNameOrIP()+":10255", "/metrics/resource")
}
func nodeID(element interface{}) string {
diff --git a/test/e2e_node/util.go b/test/e2e_node/util.go
index 3137edc726c..235d7affd48 100644
--- a/test/e2e_node/util.go
+++ b/test/e2e_node/util.go
@@ -40,6 +40,7 @@ import (
v1 "k8s.io/api/core/v1"
apiequality "k8s.io/apimachinery/pkg/api/equality"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/apimachinery/pkg/util/sets"
utilfeature "k8s.io/apiserver/pkg/util/feature"
@@ -289,7 +290,7 @@ func logKubeletLatencyMetrics(ctx context.Context, metricNames ...string) {
for _, key := range metricNames {
metricSet.Insert(kubeletmetrics.KubeletSubsystem + "_" + key)
}
- metric, err := e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, fmt.Sprintf("%s:%d", framework.TestContext.NodeName, ports.KubeletReadOnlyPort), "/metrics")
+ metric, err := e2emetrics.GrabKubeletMetricsWithoutProxy(ctx, fmt.Sprintf("%s:%d", nodeNameOrIP(), ports.KubeletReadOnlyPort), "/metrics")
if err != nil {
framework.Logf("Error getting kubelet metrics: %v", err)
} else {
@@ -623,3 +624,22 @@ func WaitForPodInitContainerToFail(ctx context.Context, c clientset.Interface, n
return false, nil
})
}
+
+func nodeNameOrIP() string {
+ // Check if the node name in test context can be resolved
+ if ips, err := net.LookupIP(framework.TestContext.NodeName); err != nil {
+ if dnsErr, ok := err.(*net.DNSError); ok && dnsErr.IsNotFound {
+ // if it can't be resolved, pick a host interface
+ if ip, err := utilnet.ChooseHostInterface(); err == nil {
+ return ip.String()
+ }
+ }
+ } else {
+ if len(ips) > 0 {
+ // yay, node name resolved correctly, pick the first
+ return ips[0].String()
+ }
+ }
+ // fallback to node name in test context
+ return framework.TestContext.NodeName
+}
|
86
|
Bump to latest node-problem-detector version with arm64
Signed-off-by: Davanum Srinivas <[email protected]>
|
2023-08-18
| null |
index 492ecabfefe..1c0ef47fc65 100644
--- a/build/dependencies.yaml
+++ b/build/dependencies.yaml
@@ -86,6 +86,21 @@ dependencies:
match: BUNDLED_ETCD_VERSIONS\\?|LATEST_ETCD_VERSION\\?
- path: cluster/images/etcd/migrate/options.go
+ - name: "node-problem-detector"
+ version: 0.8.13
+ refPaths:
+ - path: test/e2e_node/image_list.go
+ match: const defaultImage
+ # TODO(dims): Ensure newer versions get uploaded to
+ # - https://console.cloud.google.com/storage/browser/gke-release/winnode/node-problem-detector
+ # - https://gcsweb.k8s.io/gcs/kubernetes-release/node-problem-detector/
+ # and then the following references get fixed.
+ #
+ # - path: cluster/gce/gci/configure.sh
+ # match: DEFAULT_NPD_VERSION=
+ #- path: cluster/gce/windows/k8s-node-setup.psm1
+ # match: DEFAULT_NPD_VERSION
+
# From https://github.com/etcd-io/etcd/blob/main/Makefile
- name: "golang: etcd release version"
version: 1.19.9 # https://github.com/etcd-io/etcd/blob/main/CHANGELOG/CHANGELOG-3.5.md
diff --git a/test/e2e_node/image_list.go b/test/e2e_node/image_list.go
index 500e54ed2d4..31d7af10d12 100644
--- a/test/e2e_node/image_list.go
+++ b/test/e2e_node/image_list.go
@@ -106,7 +106,7 @@ func isRunningOnArm64() bool {
}
func getNodeProblemDetectorImage() string {
- const defaultImage string = "registry.k8s.io/node-problem-detector/node-problem-detector:v0.8.7"
+ const defaultImage string = "registry.k8s.io/node-problem-detector/node-problem-detector:v0.8.13"
image := os.Getenv("NODE_PROBLEM_DETECTOR_IMAGE")
if image == "" {
image = defaultImage
|
87
|
add test for apiservices.apiregistration.k8s.io discoverability in openapi/v3
|
2023-08-18
| null |
index de809d5bead..5ffed17913f 100644
--- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go
+++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go
@@ -23,15 +23,20 @@ import (
"strings"
"testing"
+ "github.com/emicklei/go-restful/v3"
"github.com/stretchr/testify/assert"
+ "k8s.io/apimachinery/pkg/runtime"
"k8s.io/apiserver/pkg/endpoints/metrics"
+ openapinamer "k8s.io/apiserver/pkg/endpoints/openapi"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
"k8s.io/component-base/metrics/legacyregistry"
"k8s.io/component-base/metrics/testutil"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
+ openapicommon "k8s.io/kube-openapi/pkg/common"
"k8s.io/kube-openapi/pkg/handler3"
+ kubeopenapispec "k8s.io/kube-openapi/pkg/validation/spec"
)
type testV3APIService struct {
@@ -170,6 +175,54 @@ func TestV3APIService(t *testing.T) {
assert.ElementsMatch(t, []string{openAPIV2Converter, apiService.Name}, apiServiceNames)
}
+func TestV3RootAPIService(t *testing.T) {
+ ws := new(restful.WebService)
+ {
+ ws.Path("/apis/apiregistration.k8s.io/v1")
+ ws.Doc("API at/apis/apiregistration.k8s.io/v1 ")
+ ws.Consumes("*/*")
+ ws.Produces("application/json")
+ ws.ApiVersion("apiregistration.k8s.io/v1")
+ routeBuilder := ws.GET("apiservices").
+ To(func(request *restful.Request, response *restful.Response) {}).
+ Doc("list or watch objects of kind APIService").
+ Operation("listAPIService").
+ Produces("application/json").
+ Returns(http.StatusOK, "OK", v1.APIService{}).
+ Writes(v1.APIService{})
+ ws.Route(routeBuilder)
+ }
+ openapiConfig := genericapiserver.DefaultOpenAPIV3Config(getTestAPIServiceOpenAPIDefinitions, openapinamer.NewDefinitionNamer(runtime.NewScheme()))
+
+ downloader := Downloader{}
+ goRestfulContainer := restful.NewContainer()
+ goRestfulContainer.Add(ws)
+ pathHandler := mux.NewPathRecorderMux("aggregator_test")
+ var serveHandler http.Handler = pathHandler
+ specProxier, err := BuildAndRegisterAggregator(downloader, genericapiserver.NewEmptyDelegate(), goRestfulContainer, openapiConfig, pathHandler)
+ if err != nil {
+ t.Error(err)
+ }
+ expectedSpecJSON := []byte(`{"openapi":"3.0.0","info":{"title":"Generic API Server"},"paths":{"/apis/apiregistration.k8s.io/v1/apiservices":{"get":{"tags":["apiregistration_v1"],"description":"list or watch objects of kind APIService","operationId":"listApiregistrationV1APIService","responses":{"200":{"description":"OK","content":{"application/json":{"schema":{"$ref":"#/components/schemas/io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService"}}}}}}}},"components":{"schemas":{"io.k8s.kube-aggregator.pkg.apis.apiregistration.v1.APIService":{"description":"APIService represents a server for a particular GroupVersion. Name must be \\"version.group\\".","type":"object"}}}}`)
+
+ data := sendReq(t, serveHandler, "/openapi/v3")
+ groupVersionList := handler3.OpenAPIV3Discovery{}
+ if err := json.Unmarshal(data, &groupVersionList); err != nil {
+ t.Fatal(err)
+ }
+ path, ok := groupVersionList.Paths["apis/apiregistration.k8s.io/v1"]
+ if !ok {
+ t.Error("Expected apiregistration.k8s.io/v1 to be in group version list")
+ }
+ gotSpecJSON := sendReq(t, serveHandler, path.ServerRelativeURL)
+ if bytes.Compare(gotSpecJSON, expectedSpecJSON) != 0 {
+ t.Errorf("Spec mismatch, expected %s, got %s", expectedSpecJSON, gotSpecJSON)
+ }
+
+ apiServiceNames := specProxier.GetAPIServiceNames()
+ assert.ElementsMatch(t, []string{"k8s_internal_local_kube_aggregator_types", openAPIV2Converter}, apiServiceNames)
+}
+
func TestOpenAPIRequestMetrics(t *testing.T) {
metrics.Register()
metrics.Reset()
@@ -239,3 +292,20 @@ func sendReq(t *testing.T, handler http.Handler, path string) []byte {
handler.ServeHTTP(writer, req)
return writer.data
}
+
+func getTestAPIServiceOpenAPIDefinitions(_ openapicommon.ReferenceCallback) map[string]openapicommon.OpenAPIDefinition {
+ return map[string]openapicommon.OpenAPIDefinition{
+ "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1.APIService": buildTestAPIServiceOpenAPIDefinition(),
+ }
+}
+
+func buildTestAPIServiceOpenAPIDefinition() openapicommon.OpenAPIDefinition {
+ return openapicommon.OpenAPIDefinition{
+ Schema: kubeopenapispec.Schema{
+ SchemaProps: kubeopenapispec.SchemaProps{
+ Description: "APIService represents a server for a particular GroupVersion. Name must be \\"version.group\\".",
+ Type: []string{"object"},
+ },
+ },
+ }
+}
|
89
|
expose apiregistration.k8s.io into OpenAPIV3
|
2023-08-18
| null |
index cf83c3a6fe5..bc4da3a8946 100644
--- a/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go
+++ b/staging/src/k8s.io/kube-aggregator/pkg/apiserver/apiserver.go
@@ -440,6 +440,8 @@ func (s *APIAggregator) PrepareRun() (preparedAPIAggregator, error) {
openAPIV3Aggregator, err := openapiv3aggregator.BuildAndRegisterAggregator(
specDownloaderV3,
s.GenericAPIServer.NextDelegate(),
+ s.GenericAPIServer.Handler.GoRestfulContainer,
+ s.openAPIConfig,
s.GenericAPIServer.Handler.NonGoRestfulMux)
if err != nil {
return preparedAPIAggregator{}, err
diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go
index 2d79554f756..f0774cc5f53 100644
--- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go
+++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator.go
@@ -25,9 +25,13 @@ import (
"sync"
"time"
+ "github.com/emicklei/go-restful/v3"
+
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apiserver/pkg/endpoints/metrics"
"k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/mux"
+ "k8s.io/apiserver/pkg/server/routes"
"k8s.io/klog/v2"
v1 "k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-openapi/pkg/common"
@@ -73,12 +77,29 @@ func (s *specProxier) GetAPIServiceNames() []string {
}
// BuildAndRegisterAggregator registered OpenAPI aggregator handler. This function is not thread safe as it only being called on startup.
-func BuildAndRegisterAggregator(downloader Downloader, delegationTarget server.DelegationTarget, pathHandler common.PathHandlerByGroupVersion) (SpecProxier, error) {
+func BuildAndRegisterAggregator(downloader Downloader, delegationTarget server.DelegationTarget, aggregatorService *restful.Container, openAPIConfig *common.Config, pathHandler common.PathHandlerByGroupVersion) (SpecProxier, error) {
s := &specProxier{
apiServiceInfo: map[string]*openAPIV3APIServiceInfo{},
downloader: downloader,
}
+ if aggregatorService != nil && openAPIConfig != nil {
+ // Make native types exposed by aggregator available to the aggregated
+ // OpenAPI (normal handle is disabled by skipOpenAPIInstallation option)
+ aggregatorLocalServiceName := "k8s_internal_local_kube_aggregator_types"
+ v3Mux := mux.NewPathRecorderMux(aggregatorLocalServiceName)
+ _ = routes.OpenAPI{
+ Config: openAPIConfig,
+ }.InstallV3(aggregatorService, v3Mux)
+
+ s.AddUpdateAPIService(v3Mux, &v1.APIService{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: aggregatorLocalServiceName,
+ },
+ })
+ s.UpdateAPIServiceSpec(aggregatorLocalServiceName)
+ }
+
i := 1
for delegate := delegationTarget; delegate != nil; delegate = delegate.NextDelegate() {
handler := delegate.UnprotectedHandler()
diff --git a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go
index 69b3caf392f..de809d5bead 100644
--- a/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go
+++ b/staging/src/k8s.io/kube-aggregator/pkg/controllers/openapiv3/aggregator/aggregator_test.go
@@ -89,7 +89,7 @@ func TestV2APIService(t *testing.T) {
downloader := Downloader{}
pathHandler := mux.NewPathRecorderMux("aggregator_test")
var serveHandler http.Handler = pathHandler
- specProxier, err := BuildAndRegisterAggregator(downloader, genericapiserver.NewEmptyDelegate(), pathHandler)
+ specProxier, err := BuildAndRegisterAggregator(downloader, genericapiserver.NewEmptyDelegate(), nil, nil, pathHandler)
if err != nil {
t.Error(err)
}
@@ -133,7 +133,7 @@ func TestV3APIService(t *testing.T) {
pathHandler := mux.NewPathRecorderMux("aggregator_test")
var serveHandler http.Handler = pathHandler
- specProxier, err := BuildAndRegisterAggregator(downloader, genericapiserver.NewEmptyDelegate(), pathHandler)
+ specProxier, err := BuildAndRegisterAggregator(downloader, genericapiserver.NewEmptyDelegate(), nil, nil, pathHandler)
if err != nil {
t.Error(err)
}
@@ -178,7 +178,7 @@ func TestOpenAPIRequestMetrics(t *testing.T) {
pathHandler := mux.NewPathRecorderMux("aggregator_metrics_test")
var serveHandler http.Handler = pathHandler
- specProxier, err := BuildAndRegisterAggregator(downloader, genericapiserver.NewEmptyDelegate(), pathHandler)
+ specProxier, err := BuildAndRegisterAggregator(downloader, genericapiserver.NewEmptyDelegate(), nil, nil, pathHandler)
if err != nil {
t.Error(err)
}
|
90
|
feature flags should be ordered alphabetically
|
2023-08-18
| null |
index 3827a55e388..05280321a6c 100644
--- a/pkg/features/kube_features.go
+++ b/pkg/features/kube_features.go
@@ -374,15 +374,6 @@ const (
// Causes kubelet to no longer create legacy IPTables rules
IPTablesOwnershipCleanup featuregate.Feature = "IPTablesOwnershipCleanup"
- // owner: @mimowo
- // kep: https://kep.k8s.io/3329
- // alpha: v1.25
- // beta: v1.26
- //
- // Allow users to specify handling of pod failures based on container exit codes
- // and pod conditions.
- JobPodFailurePolicy featuregate.Feature = "JobPodFailurePolicy"
-
// owner: @ahg
// beta: v1.23
// stable: v1.27
@@ -392,6 +383,15 @@ const (
// that have never been unsuspended before.
JobMutableNodeSchedulingDirectives featuregate.Feature = "JobMutableNodeSchedulingDirectives"
+ // owner: @mimowo
+ // kep: https://kep.k8s.io/3329
+ // alpha: v1.25
+ // beta: v1.26
+ //
+ // Allow users to specify handling of pod failures based on container exit codes
+ // and pod conditions.
+ JobPodFailurePolicy featuregate.Feature = "JobPodFailurePolicy"
+
// owner: @alculquicondor
// alpha: v1.23
// beta: v1.24
@@ -948,10 +948,10 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
IPTablesOwnershipCleanup: {Default: true, PreRelease: featuregate.Beta},
- JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
-
JobMutableNodeSchedulingDirectives: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.29
+ JobPodFailurePolicy: {Default: true, PreRelease: featuregate.Beta},
+
JobReadyPods: {Default: true, PreRelease: featuregate.Beta},
JobTrackingWithFinalizers: {Default: true, PreRelease: featuregate.GA, LockToDefault: true}, // remove in 1.28
|
92
|
add feature gate
|
2023-08-18
| null |
index cce20c08b5f..fa76c7ccff5 100644
--- a/pkg/controller/job/job_controller.go
+++ b/pkg/controller/job/job_controller.go
@@ -1482,7 +1482,10 @@ func (jm *Controller) manageJob(ctx context.Context, job *batch.Job, activePods
if completionIndex != unknownCompletionIndex {
template = podTemplate.DeepCopy()
addCompletionIndexAnnotation(template, completionIndex)
- addCompletionIndexLabel(template, completionIndex)
+
+ if feature.DefaultFeatureGate.Enabled(features.PodIndexLabel) {
+ addCompletionIndexLabel(template, completionIndex)
+ }
template.Spec.Hostname = fmt.Sprintf("%s-%d", job.Name, completionIndex)
generateName = podGenerateNameWithIndex(job.Name, completionIndex)
}
diff --git a/pkg/controller/job/job_controller_test.go b/pkg/controller/job/job_controller_test.go
index 8087c16e8d2..a0608acbad3 100644
--- a/pkg/controller/job/job_controller_test.go
+++ b/pkg/controller/job/job_controller_test.go
@@ -962,7 +962,9 @@ func checkIndexedJobPods(t *testing.T, control *controller.FakePodControl, wantI
gotIndexes := sets.New[int]()
for _, p := range control.Templates {
checkJobCompletionEnvVariable(t, &p.Spec)
- checkJobCompletionLabel(t, &p)
+ if feature.DefaultFeatureGate.Enabled(features.PodIndexLabel) {
+ checkJobCompletionLabel(t, &p)
+ }
ix := getCompletionIndex(p.Annotations)
if ix == -1 {
t.Errorf("Created pod %s didn't have completion index", p.Name)
@@ -4395,6 +4397,7 @@ func TestFinalizersRemovedExpectations(t *testing.T) {
t.Errorf("Timeout waiting for expectations (-want, +got):\\n%s", diff)
}
}
+
func checkJobCompletionLabel(t *testing.T, p *v1.PodTemplateSpec) {
t.Helper()
labels := p.GetLabels()
diff --git a/pkg/features/kube_features.go b/pkg/features/kube_features.go
index 3827a55e388..26ea0409fe3 100644
--- a/pkg/features/kube_features.go
+++ b/pkg/features/kube_features.go
@@ -843,6 +843,13 @@ const (
//
// Enables In-Place Pod Vertical Scaling
InPlacePodVerticalScaling featuregate.Feature = "InPlacePodVerticalScaling"
+
+ // owner: @danielvegamyhre
+ // kep: https://kep.k8s.io/4017
+ // beta: v1.28
+ //
+ // Set pod completion index as a pod label for Indexed Jobs and StatefulSets.
+ PodIndexLabel featuregate.Feature = "PodIndexLabel"
)
func init() {
@@ -1072,6 +1079,8 @@ var defaultKubernetesFeatureGates = map[featuregate.Feature]featuregate.FeatureS
InPlacePodVerticalScaling: {Default: false, PreRelease: featuregate.Alpha},
+ PodIndexLabel: {Default: true, PreRelease: featuregate.Beta},
+
// inherited features from generic apiserver, relisted here to get a conflict if it is changed
// unintentionally on either side:
|
93
|
Merge pull request #118786 from pohly/dra-test-skip-prepare
dra: kubelet must skip NodePrepareResource if not used by any container
|
2023-08-18
| null | null |
95
|
Merge pull request #118903 from champtar/revert-118631-ca-not-before
Revert "Make CA valid 1 hour in the past"
|
2023-08-18
| null | null |
96
|
Merge pull request #118901 from dims/set-aws-specific-credential-provider-when-running-there
Set AWS specific credential provider when running there
|
2023-08-18
| null | null |
97
|
Merge pull request #118866 from neolit123/1.28-add-v1beta4-to-scheme
kubeadm: add v1beta4 to scheme; add --allow-experimental-api flag
|
2023-08-18
| null | null |
99
|
kubectl explain should work for both cluster and namespace resources and without a GET method
|
2023-08-18
| null |
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete a Job",
+ "operationId": "deleteBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update the specified Job",
+ "operationId": "patchBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the Job",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read status of the specified Job",
+ "operationId": "readBatchV1NamespacedJobStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace status of the specified Job",
+ "operationId": "replaceBatchV1NamespacedJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update status of the specified Job",
+ "operationId": "patchBatchV1NamespacedJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the Job",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1CronJobListForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1JobListForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1NamespacedCronJobList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/cronjobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "operationId": "watchBatchV1NamespacedCronJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the CronJob",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1NamespacedJobList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/jobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch changes to an object of kind Job. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "operationId": "watchBatchV1NamespacedJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the Job",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ }
+ },
+ "components": {
+ "schemas": {
+ "io.k8s.api.batch.v1.CronJob": {
+ "description": "CronJob represents the configuration of a single cron job.",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ }
+ ]
+ },
+ "spec": {
+ "description": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobSpec"
+ }
+ ]
+ },
+ "status": {
+ "description": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobStatus"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "CronJob",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.CronJobList": {
+ "description": "CronJobList is a collection of cron jobs.",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "description": "items is the list of CronJobs.",
+ "type": "array",
+ "items": {
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ ]
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "CronJobList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.CronJobSpec": {
+ "description": "CronJobSpec describes how the job execution will look like and when it will actually run.",
+ "type": "object",
+ "required": [
+ "schedule",
+ "jobTemplate"
+ ],
+ "properties": {
+ "concurrencyPolicy": {
+ "description": "Specifies how to treat concurrent executions of a Job. Valid values are:\\n\\n- \\"Allow\\" (default): allows CronJobs to run concurrently; - \\"Forbid\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\"Replace\\": cancels currently running job and replaces it with a new one\\n\\nPossible enum values:\\n - `\\"Allow\\"` allows CronJobs to run concurrently.\\n - `\\"Forbid\\"` forbids concurrent runs, skipping next run if previous hasn't finished yet.\\n - `\\"Replace\\"` cancels currently running job and replaces it with a new one.",
+ "type": "string",
+ "enum": [
+ "Allow",
+ "Forbid",
+ "Replace"
+ ]
+ },
+ "failedJobsHistoryLimit": {
+ "description": "The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "jobTemplate": {
+ "description": "Specifies the job that will be created when executing a CronJob.",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobTemplateSpec"
+ }
+ ]
+ },
+ "schedule": {
+ "description": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
+ "type": "string",
+ "default": ""
+ },
+ "startingDeadlineSeconds": {
+ "description": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "successfulJobsHistoryLimit": {
+ "description": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "suspend": {
+ "description": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
+ "type": "boolean"
+ },
+ "timeZone": {
+ "description": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.api.batch.v1.CronJobStatus": {
+ "description": "CronJobStatus represents the current state of a cron job.",
+ "type": "object",
+ "properties": {
+ "active": {
+ "description": "A list of pointers to currently running jobs.",
+ "type": "array",
+ "items": {
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.core.v1.ObjectReference"
+ }
+ ]
+ },
+ "x-kubernetes-list-type": "atomic"
+ },
+ "lastScheduleTime": {
+ "description": "Information when was the last time the job was successfully scheduled.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ },
+ "lastSuccessfulTime": {
+ "description": "Information when was the last time the job successfully completed.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ }
+ }
+ },
+ "io.k8s.api.batch.v1.Job": {
+ "description": "Job represents the configuration of a single job.",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ }
+ ]
+ },
+ "spec": {
+ "description": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobSpec"
+ }
+ ]
+ },
+ "status": {
+ "description": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobStatus"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "Job",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.JobCondition": {
+ "description": "JobCondition describes current state of a job.",
+ "type": "object",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastProbeTime": {
+ "description": "Last time the condition was checked.",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ },
+ "lastTransitionTime": {
+ "description": "Last time the condition transit from one status to another.",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ },
+ "message": {
+ "description": "Human readable message indicating details about last transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "(brief) reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string",
+ "default": ""
+ },
+ "type": {
+ "description": "Type of job condition, Complete or Failed.",
+ "type": "string",
+ "default": ""
+ }
+ }
+ },
+ "io.k8s.api.batch.v1.JobList": {
+ "description": "JobList is a collection of jobs.",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "description": "items is the list of Jobs.",
+ "type": "array",
+ "items": {
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ ]
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "JobList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.JobSpec": {
|
100
|
kubectl explain should work for both cluster and namespace resources and without a GET method
|
2023-08-18
| null |
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "delete": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "delete a Job",
+ "operationId": "deleteBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "gracePeriodSeconds",
+ "in": "query",
+ "description": "The duration in seconds before the object should be deleted. Value must be non-negative integer. The value zero indicates delete immediately. If this value is nil, the default grace period for the specified type will be used. Defaults to a per object value if not specified. zero means delete immediately.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "orphanDependents",
+ "in": "query",
+ "description": "Deprecated: please use the PropagationPolicy, this field will be deprecated in 1.7. Should the dependent objects be orphaned. If true/false, the \\"orphan\\" finalizer will be added to/removed from the object's finalizers list. Either this field or PropagationPolicy may be set, but not both.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "propagationPolicy",
+ "in": "query",
+ "description": "Whether and how garbage collection will be performed. Either this field or OrphanDependents may be set, but not both. The default policy is decided by the existing finalizer set in the metadata.finalizers and the resource-specific default policy. Acceptable values are: 'Orphan' - orphan the dependents; 'Background' - allow the garbage collector to delete the dependents in the background; 'Foreground' - a cascading policy that deletes all dependents in the foreground.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.DeleteOptions"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "202": {
+ "description": "Accepted",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Status"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "delete",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update the specified Job",
+ "operationId": "patchBatchV1NamespacedJob",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the Job",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/namespaces/{namespace}/jobs/{name}/status": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "read status of the specified Job",
+ "operationId": "readBatchV1NamespacedJobStatus",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "get",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "put": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "replace status of the specified Job",
+ "operationId": "replaceBatchV1NamespacedJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "*/*": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "put",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "patch": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "partially update status of the specified Job",
+ "operationId": "patchBatchV1NamespacedJobStatus",
+ "parameters": [
+ {
+ "name": "dryRun",
+ "in": "query",
+ "description": "When present, indicates that modifications should not be persisted. An invalid or unrecognized dryRun directive will result in an error response and no further processing of the request. Valid values are: - All: all dry run stages will be processed",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldManager",
+ "in": "query",
+ "description": "fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. This field is required for apply requests (application/apply-patch) but optional for non-apply patch types (JsonPatch, MergePatch, StrategicMergePatch).",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldValidation",
+ "in": "query",
+ "description": "fieldValidation instructs the server on how to handle objects in the request (POST/PUT/PATCH) containing unknown or duplicate fields. Valid values are: - Ignore: This will ignore any unknown fields that are silently dropped from the object, and will ignore all but the last duplicate field that the decoder encounters. This is the default behavior prior to v1.23. - Warn: This will send a warning via the standard warning response header for each unknown field that is dropped from the object, and for each duplicate field that is encountered. The request will still succeed if there are no other errors, and will only persist the last of any duplicate fields. This is the default in v1.23+ - Strict: This will fail the request with a BadRequest error if any unknown fields would be dropped from the object, or if any duplicate fields are present. The error returned from the server will contain all unknown and duplicate fields encountered.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "force",
+ "in": "query",
+ "description": "Force is going to \\"force\\" Apply requests. It means user will re-acquire conflicting fields owned by other people. Force flag must be unset for non-apply patch requests.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ],
+ "requestBody": {
+ "content": {
+ "application/apply-patch+yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/json-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ },
+ "application/strategic-merge-patch+json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Patch"
+ }
+ }
+ }
+ },
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "201": {
+ "description": "Created",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "patch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the Job",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1CronJobListForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1JobListForAllNamespaces",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/cronjobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of CronJob. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1NamespacedCronJobList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/cronjobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch changes to an object of kind CronJob. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "operationId": "watchBatchV1NamespacedCronJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "CronJob"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the CronJob",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/jobs": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch individual changes to a list of Job. deprecated: use the 'watch' parameter with a list operation instead.",
+ "operationId": "watchBatchV1NamespacedJobList",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watchlist",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ },
+ "/apis/batch/v1/watch/namespaces/{namespace}/jobs/{name}": {
+ "get": {
+ "tags": [
+ "batch_v1"
+ ],
+ "description": "watch changes to an object of kind Job. deprecated: use the 'watch' parameter with a list operation instead, filtered to a single item with the 'fieldSelector' parameter.",
+ "operationId": "watchBatchV1NamespacedJob",
+ "responses": {
+ "200": {
+ "description": "OK",
+ "content": {
+ "application/json": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/json;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/vnd.kubernetes.protobuf;stream=watch": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ },
+ "application/yaml": {
+ "schema": {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.WatchEvent"
+ }
+ }
+ }
+ },
+ "401": {
+ "description": "Unauthorized"
+ }
+ },
+ "x-kubernetes-action": "watch",
+ "x-kubernetes-group-version-kind": {
+ "group": "batch",
+ "version": "v1",
+ "kind": "Job"
+ }
+ },
+ "parameters": [
+ {
+ "name": "allowWatchBookmarks",
+ "in": "query",
+ "description": "allowWatchBookmarks requests watch events with type \\"BOOKMARK\\". Servers that do not implement bookmarks may ignore this flag and bookmarks are sent at the server's discretion. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. If this is not a watch, this field is ignored.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "continue",
+ "in": "query",
+ "description": "The continue option should be set when retrieving more results from the server. Since this value is server defined, clients may only use the continue value from a previous query result with identical query parameters (except for the value of continue) and the server may reject a continue value it does not recognize. If the specified continue value is no longer valid whether due to expiration (generally five to fifteen minutes) or a configuration change on the server, the server will respond with a 410 ResourceExpired error together with a continue token. If the client needs a consistent list, it must restart their list without the continue field. Otherwise, the client may send another list request with the token received with the 410 error, the server will respond with a list starting from the next key, but from the latest snapshot, which is inconsistent from the previous list results - objects that are created, modified, or deleted after the first list request will be included in the response, as long as their keys are after the \\"next key\\".\\n\\nThis field is not supported when watch is true. Clients may start a watch from the last resourceVersion value returned by the server and not miss any modifications.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "fieldSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their fields. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "labelSelector",
+ "in": "query",
+ "description": "A selector to restrict the list of returned objects by their labels. Defaults to everything.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "limit",
+ "in": "query",
+ "description": "limit is a maximum number of responses to return for a list call. If more items exist, the server will set the `continue` field on the list metadata to a value that can be used with the same initial query to retrieve the next set of results. Setting a limit may return fewer than the requested amount of items (up to zero items) in the event all requested objects are filtered out and clients should only use the presence of the continue field to determine whether more results are available. Servers may choose not to support the limit argument and will return all of the available results. If limit is specified and the continue field is empty, clients may assume that no more results are available. This field is not supported if watch is true.\\n\\nThe server guarantees that the objects returned when using continue will be identical to issuing a single list call without a limit - that is, no objects created, modified, or deleted after the first request is issued will be included in any subsequent continued requests. This is sometimes referred to as a consistent snapshot, and ensures that a client that is using limit to receive smaller chunks of a very large result can ensure they see all possible objects. If objects are updated during a chunked list the version of the object that was present at the time the first list result was calculated is returned.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "name",
+ "in": "path",
+ "description": "name of the Job",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "namespace",
+ "in": "path",
+ "description": "object name and auth scope, such as for teams and projects",
+ "required": true,
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "pretty",
+ "in": "query",
+ "description": "If 'true', then the output is pretty printed.",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersion",
+ "in": "query",
+ "description": "resourceVersion sets a constraint on what resource versions a request may be served from. See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "resourceVersionMatch",
+ "in": "query",
+ "description": "resourceVersionMatch determines how resourceVersion is applied to list calls. It is highly recommended that resourceVersionMatch be set for list calls where resourceVersion is set See https://kubernetes.io/docs/reference/using-api/api-concepts/#resource-versions for details.\\n\\nDefaults to unset",
+ "schema": {
+ "type": "string",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "sendInitialEvents",
+ "in": "query",
+ "description": "`sendInitialEvents=true` may be set together with `watch=true`. In that case, the watch stream will begin with synthetic events to produce the current state of objects in the collection. Once all such events have been sent, a synthetic \\"Bookmark\\" event will be sent. The bookmark will report the ResourceVersion (RV) corresponding to the set of objects, and be marked with `\\"k8s.io/initial-events-end\\": \\"true\\"` annotation. Afterwards, the watch stream will proceed as usual, sending watch events corresponding to changes (subsequent to the RV) to objects watched.\\n\\nWhen `sendInitialEvents` option is set, we require `resourceVersionMatch` option to also be set. The semantic of the watch request is as following: - `resourceVersionMatch` = NotOlderThan\\n is interpreted as \\"data at least as new as the provided `resourceVersion`\\"\\n and the bookmark event is send when the state is synced\\n to a `resourceVersion` at least as fresh as the one provided by the ListOptions.\\n If `resourceVersion` is unset, this is interpreted as \\"consistent read\\" and the\\n bookmark event is send when the state is synced at least to the moment\\n when request started being processed.\\n- `resourceVersionMatch` set to any other value or unset\\n Invalid error is returned.\\n\\nDefaults to true if `resourceVersion=\\"\\"` or `resourceVersion=\\"0\\"` (for backward compatibility reasons) and to false otherwise.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "timeoutSeconds",
+ "in": "query",
+ "description": "Timeout for the list/watch call. This limits the duration of the call, regardless of any activity or inactivity.",
+ "schema": {
+ "type": "integer",
+ "uniqueItems": true
+ }
+ },
+ {
+ "name": "watch",
+ "in": "query",
+ "description": "Watch for changes to the described resources and return them as a stream of add, update, and remove notifications. Specify resourceVersion.",
+ "schema": {
+ "type": "boolean",
+ "uniqueItems": true
+ }
+ }
+ ]
+ }
+ },
+ "components": {
+ "schemas": {
+ "io.k8s.api.batch.v1.CronJob": {
+ "description": "CronJob represents the configuration of a single cron job.",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ }
+ ]
+ },
+ "spec": {
+ "description": "Specification of the desired behavior of a cron job, including the schedule. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobSpec"
+ }
+ ]
+ },
+ "status": {
+ "description": "Current status of a cron job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJobStatus"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "CronJob",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.CronJobList": {
+ "description": "CronJobList is a collection of cron jobs.",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "description": "items is the list of CronJobs.",
+ "type": "array",
+ "items": {
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.CronJob"
+ }
+ ]
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "CronJobList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.CronJobSpec": {
+ "description": "CronJobSpec describes how the job execution will look like and when it will actually run.",
+ "type": "object",
+ "required": [
+ "schedule",
+ "jobTemplate"
+ ],
+ "properties": {
+ "concurrencyPolicy": {
+ "description": "Specifies how to treat concurrent executions of a Job. Valid values are:\\n\\n- \\"Allow\\" (default): allows CronJobs to run concurrently; - \\"Forbid\\": forbids concurrent runs, skipping next run if previous run hasn't finished yet; - \\"Replace\\": cancels currently running job and replaces it with a new one\\n\\nPossible enum values:\\n - `\\"Allow\\"` allows CronJobs to run concurrently.\\n - `\\"Forbid\\"` forbids concurrent runs, skipping next run if previous hasn't finished yet.\\n - `\\"Replace\\"` cancels currently running job and replaces it with a new one.",
+ "type": "string",
+ "enum": [
+ "Allow",
+ "Forbid",
+ "Replace"
+ ]
+ },
+ "failedJobsHistoryLimit": {
+ "description": "The number of failed finished jobs to retain. Value must be non-negative integer. Defaults to 1.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "jobTemplate": {
+ "description": "Specifies the job that will be created when executing a CronJob.",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobTemplateSpec"
+ }
+ ]
+ },
+ "schedule": {
+ "description": "The schedule in Cron format, see https://en.wikipedia.org/wiki/Cron.",
+ "type": "string",
+ "default": ""
+ },
+ "startingDeadlineSeconds": {
+ "description": "Optional deadline in seconds for starting the job if it misses scheduled time for any reason. Missed jobs executions will be counted as failed ones.",
+ "type": "integer",
+ "format": "int64"
+ },
+ "successfulJobsHistoryLimit": {
+ "description": "The number of successful finished jobs to retain. Value must be non-negative integer. Defaults to 3.",
+ "type": "integer",
+ "format": "int32"
+ },
+ "suspend": {
+ "description": "This flag tells the controller to suspend subsequent executions, it does not apply to already started executions. Defaults to false.",
+ "type": "boolean"
+ },
+ "timeZone": {
+ "description": "The time zone name for the given schedule, see https://en.wikipedia.org/wiki/List_of_tz_database_time_zones. If not specified, this will default to the time zone of the kube-controller-manager process. The set of valid time zone names and the time zone offset is loaded from the system-wide time zone database by the API server during CronJob validation and the controller manager during execution. If no system-wide time zone database can be found a bundled version of the database is used instead. If the time zone name becomes invalid during the lifetime of a CronJob or due to a change in host configuration, the controller will stop creating new new Jobs and will create a system event with the reason UnknownTimeZone. More information can be found in https://kubernetes.io/docs/concepts/workloads/controllers/cron-jobs/#time-zones",
+ "type": "string"
+ }
+ }
+ },
+ "io.k8s.api.batch.v1.CronJobStatus": {
+ "description": "CronJobStatus represents the current state of a cron job.",
+ "type": "object",
+ "properties": {
+ "active": {
+ "description": "A list of pointers to currently running jobs.",
+ "type": "array",
+ "items": {
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.core.v1.ObjectReference"
+ }
+ ]
+ },
+ "x-kubernetes-list-type": "atomic"
+ },
+ "lastScheduleTime": {
+ "description": "Information when was the last time the job was successfully scheduled.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ },
+ "lastSuccessfulTime": {
+ "description": "Information when was the last time the job successfully completed.",
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ }
+ }
+ },
+ "io.k8s.api.batch.v1.Job": {
+ "description": "Job represents the configuration of a single job.",
+ "type": "object",
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ObjectMeta"
+ }
+ ]
+ },
+ "spec": {
+ "description": "Specification of the desired behavior of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobSpec"
+ }
+ ]
+ },
+ "status": {
+ "description": "Current status of a job. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.JobStatus"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "Job",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.JobCondition": {
+ "description": "JobCondition describes current state of a job.",
+ "type": "object",
+ "required": [
+ "type",
+ "status"
+ ],
+ "properties": {
+ "lastProbeTime": {
+ "description": "Last time the condition was checked.",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ },
+ "lastTransitionTime": {
+ "description": "Last time the condition transit from one status to another.",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.Time"
+ }
+ ]
+ },
+ "message": {
+ "description": "Human readable message indicating details about last transition.",
+ "type": "string"
+ },
+ "reason": {
+ "description": "(brief) reason for the condition's last transition.",
+ "type": "string"
+ },
+ "status": {
+ "description": "Status of the condition, one of True, False, Unknown.",
+ "type": "string",
+ "default": ""
+ },
+ "type": {
+ "description": "Type of job condition, Complete or Failed.",
+ "type": "string",
+ "default": ""
+ }
+ }
+ },
+ "io.k8s.api.batch.v1.JobList": {
+ "description": "JobList is a collection of jobs.",
+ "type": "object",
+ "required": [
+ "items"
+ ],
+ "properties": {
+ "apiVersion": {
+ "description": "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources",
+ "type": "string"
+ },
+ "items": {
+ "description": "items is the list of Jobs.",
+ "type": "array",
+ "items": {
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.api.batch.v1.Job"
+ }
+ ]
+ }
+ },
+ "kind": {
+ "description": "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
+ "type": "string"
+ },
+ "metadata": {
+ "description": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
+ "default": {},
+ "allOf": [
+ {
+ "$ref": "#/components/schemas/io.k8s.apimachinery.pkg.apis.meta.v1.ListMeta"
+ }
+ ]
+ }
+ },
+ "x-kubernetes-group-version-kind": [
+ {
+ "group": "batch",
+ "kind": "JobList",
+ "version": "v1"
+ }
+ ]
+ },
+ "io.k8s.api.batch.v1.JobSpec": {
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.