Compare commits

...

11 Commits

Author SHA1 Message Date
bad5b2ba1c Обновить README.md
All checks were successful
Lint / Run on Ubuntu (push) Successful in 19s
Tests / Run on Ubuntu (push) Successful in 27s
2025-05-05 07:25:24 +03:00
56b7b7a0b6 Merge pull request 'pod autoremoval feature added' (#4) from autofix into main
All checks were successful
Lint / Run on Ubuntu (push) Successful in 20s
Tests / Run on Ubuntu (push) Successful in 24s
Reviewed-on: #4
2025-05-04 23:10:58 +03:00
823a3a0a4d pod autoremoval feature added
All checks were successful
Lint / Run on Ubuntu (push) Successful in 22s
Tests / Run on Ubuntu (push) Successful in 28s
Lint / Run on Ubuntu (pull_request) Successful in 17s
Tests / Run on Ubuntu (pull_request) Successful in 25s
2025-05-05 00:57:24 +05:00
8192f888f7 Merge pull request 'updates' (#3) from updates into main
All checks were successful
Lint / Run on Ubuntu (push) Successful in 18s
Tests / Run on Ubuntu (push) Successful in 23s
Reviewed-on: #3
2025-05-04 18:57:53 +03:00
6e32cf842a fix
All checks were successful
Lint / Run on Ubuntu (push) Successful in 22s
Tests / Run on Ubuntu (push) Successful in 49s
Lint / Run on Ubuntu (pull_request) Successful in 20s
Tests / Run on Ubuntu (pull_request) Successful in 26s
2025-05-04 20:06:54 +05:00
8d581652a4 automatic image updates from public repos with selected tags feature added
All checks were successful
Lint / Run on Ubuntu (push) Successful in 1m30s
Tests / Run on Ubuntu (push) Successful in 1m38s
2025-05-04 19:24:31 +05:00
8029529f89 Merge pull request 'resources' (#2) from resources into main
All checks were successful
Lint / Run on Ubuntu (push) Successful in 29s
Tests / Run on Ubuntu (push) Successful in 1m28s
Reviewed-on: #2
2025-05-01 18:02:17 +03:00
2f05069e01 linter fixes
All checks were successful
Lint / Run on Ubuntu (push) Successful in 21s
Tests / Run on Ubuntu (push) Successful in 26s
Lint / Run on Ubuntu (pull_request) Successful in 20s
Tests / Run on Ubuntu (pull_request) Successful in 50s
2025-05-01 19:39:58 +05:00
23b333c07d alpha ver. of deployments resource controller
Some checks failed
Lint / Run on Ubuntu (push) Failing after 53s
Tests / Run on Ubuntu (push) Successful in 55s
2025-05-01 18:00:10 +05:00
fc5f580243 comments fixes
All checks were successful
Lint / Run on Ubuntu (push) Successful in 26s
Tests / Run on Ubuntu (push) Successful in 27s
2025-04-30 01:04:13 +05:00
010bd5fbe0 linter fixes
All checks were successful
Lint / Run on Ubuntu (push) Successful in 32s
Tests / Run on Ubuntu (push) Successful in 29s
2025-04-30 00:08:42 +05:00
13 changed files with 1016 additions and 76 deletions

View File

@@ -4,7 +4,7 @@ A Kubernetes operator that can perform various actions:
- Place taints on nodes based on their names.
- Place default resource requests/limits on deployments that lack them (but you can also use exclusion words to ignore deployment requests/limits checks).
- Upgrade deployment images based on a predefined list of periodically checked tags (such as :latest, :master, etc.).
- Attempt to fix CrashLoopBackOff by recreating linked resources (such as secrets, if they have not updated from a third-party provider (e.g., Vault)).
- Attempt to fix CrashLoopBackOff by deleting pod (so it will appear on another node) in deployments defined in CRD config.
Note: This is a college graduation project, so it may contain bugs and may not follow best practices.

View File

@@ -23,6 +23,66 @@ import (
// EDIT THIS FILE! THIS IS SCAFFOLDING FOR YOU TO OWN!
// NOTE: json tags are required. Any new fields you add must have json tags for the fields to be serialized.
// ResourceDefaults defines the default resource requests and limits.
type ResourceDefaults struct {
// Default CPU request (e.g., "100m"). Applied if a container has no CPU request.
// +optional
CPURequest string `json:"cpuRequest,omitempty"`
// Default Memory request (e.g., "128Mi"). Applied if a container has no Memory request.
// +optional
MemoryRequest string `json:"memoryRequest,omitempty"`
// Default CPU limit (e.g., "500m"). Applied if a container has no CPU limit.
// +optional
CPULimit string `json:"cpuLimit,omitempty"`
// Default Memory limit (e.g., "512Mi"). Applied if a container has no Memory limit.
// +optional
MemoryLimit string `json:"memoryLimit,omitempty"`
}
// ImageUpdatePolicy defines the policy for automatic image updates.
type ImageUpdatePolicy struct {
// Enabled toggles the image update feature.
// +optional
Enabled bool `json:"enabled,omitempty"`
// CheckInterval specifies how often to check for image updates (e.g., "5m", "1h", "15m").
// Minimum interval recommended: 5m to avoid rate limiting.
// +kubebuilder:validation:Pattern=`^([0-9]+(s|m|h))+$`
// +kubebuilder:default="1h"
// +optional
CheckInterval string `json:"checkInterval,omitempty"`
// MonitoredTags is a list of keywords found in image tags that trigger update checks.
// Example: ["latest", "master", "dev"]
// +optional
MonitoredTags []string `json:"monitoredTags,omitempty"`
// RestartAnnotation is the annotation key used to trigger deployment restarts.
// If empty, a default will be used (e.g., "image-updater.my-operator.com/restartedAt").
// +optional
RestartAnnotation string `json:"restartAnnotation,omitempty"`
}
// CrashLoopPolicy defines the policy for handling pods in CrashLoopBackOff.
type CrashLoopPolicy struct {
// Enabled toggles the CrashLoopBackOff handling feature.
// +optional
Enabled bool `json:"enabled,omitempty"`
// MonitoredDeployments is a list of Deployments (in "namespace/name" format)
// whose pods should be monitored for CrashLoopBackOff.
// +optional
MonitoredDeployments []string `json:"monitoredDeployments,omitempty"`
// RestartThreshold is the number of container restarts after which
// a pod in CrashLoopBackOff will be deleted to attempt rescheduling.
// Minimum recommended value: 3 or 5.
// +kubebuilder:validation:Minimum=1
// +kubebuilder:default=5
// +optional
RestartThreshold int32 `json:"restartThreshold,omitempty"`
}
// NodeTainterConfigSpec defines the desired state of NodeTainterConfig.
type NodeTainterConfigSpec struct {
// INSERT ADDITIONAL SPEC FIELDS - desired state of cluster
@@ -38,7 +98,26 @@ type NodeTainterConfigSpec struct {
// +kubebuilder:validation:Optional
// +kubebuilder:validation:MinProperties=1
// +optional
LabelRules map[string]string `json:"labelRules,omitempty"`
// ResourceDefaults contains the default requests/limits to apply.
// If this section is omitted, resource defaulting is disabled.
// +optional
ResourceDefaults *ResourceDefaults `json:"resourceDefaults,omitempty"`
// OptOutLabelKey is the label key used to exempt Deployments from resource defaulting.
// If a Deployment has a label with this key (any value), defaults won't be applied.
// If empty or omitted, the opt-out mechanism is disabled.
// Example: "my-operator.example.com/skip-resource-defaults"
// +optional
OptOutLabelKey string `json:"optOutLabelKey,omitempty"`
// +optional
ImageUpdatePolicy *ImageUpdatePolicy `json:"imageUpdatePolicy,omitempty"`
// +optional
CrashLoopPolicy *CrashLoopPolicy `json:"crashLoopPolicy,omitempty"`
}
// NodeTainterConfigStatus defines the observed state of NodeTainterConfig.

View File

@@ -25,6 +25,46 @@ import (
runtime "k8s.io/apimachinery/pkg/runtime"
)
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *CrashLoopPolicy) DeepCopyInto(out *CrashLoopPolicy) {
*out = *in
if in.MonitoredDeployments != nil {
in, out := &in.MonitoredDeployments, &out.MonitoredDeployments
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new CrashLoopPolicy.
func (in *CrashLoopPolicy) DeepCopy() *CrashLoopPolicy {
if in == nil {
return nil
}
out := new(CrashLoopPolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ImageUpdatePolicy) DeepCopyInto(out *ImageUpdatePolicy) {
*out = *in
if in.MonitoredTags != nil {
in, out := &in.MonitoredTags, &out.MonitoredTags
*out = make([]string, len(*in))
copy(*out, *in)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ImageUpdatePolicy.
func (in *ImageUpdatePolicy) DeepCopy() *ImageUpdatePolicy {
if in == nil {
return nil
}
out := new(ImageUpdatePolicy)
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *NodeTaintInfo) DeepCopyInto(out *NodeTaintInfo) {
*out = *in
@@ -114,6 +154,21 @@ func (in *NodeTainterConfigSpec) DeepCopyInto(out *NodeTainterConfigSpec) {
(*out)[key] = val
}
}
if in.ResourceDefaults != nil {
in, out := &in.ResourceDefaults, &out.ResourceDefaults
*out = new(ResourceDefaults)
**out = **in
}
if in.ImageUpdatePolicy != nil {
in, out := &in.ImageUpdatePolicy, &out.ImageUpdatePolicy
*out = new(ImageUpdatePolicy)
(*in).DeepCopyInto(*out)
}
if in.CrashLoopPolicy != nil {
in, out := &in.CrashLoopPolicy, &out.CrashLoopPolicy
*out = new(CrashLoopPolicy)
(*in).DeepCopyInto(*out)
}
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new NodeTainterConfigSpec.
@@ -154,3 +209,18 @@ func (in *NodeTainterConfigStatus) DeepCopy() *NodeTainterConfigStatus {
in.DeepCopyInto(out)
return out
}
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *ResourceDefaults) DeepCopyInto(out *ResourceDefaults) {
*out = *in
}
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new ResourceDefaults.
func (in *ResourceDefaults) DeepCopy() *ResourceDefaults {
if in == nil {
return nil
}
out := new(ResourceDefaults)
in.DeepCopyInto(out)
return out
}

View File

@@ -27,6 +27,7 @@ import (
_ "k8s.io/client-go/plugin/pkg/client/auth"
"k8s.io/apimachinery/pkg/runtime"
// "k8s.io/client-go/kubernetes"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
clientgoscheme "k8s.io/client-go/kubernetes/scheme"
ctrl "sigs.k8s.io/controller-runtime"
@@ -205,10 +206,39 @@ func main() {
if err = (&controller.NodeTainterConfigReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("nodetainter-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "NodeTainterConfig")
os.Exit(1)
}
if err = (&controller.DeploymentDefaultsReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("deploymentdefaults-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "DeploymentDefaults")
os.Exit(1)
}
if err = (&controller.ImageUpdateReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("imageupdate-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "ImageUpdate")
os.Exit(1)
}
if err = (&controller.PodCrashReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
Recorder: mgr.GetEventRecorderFor("podcrash-controller"),
}).SetupWithManager(mgr); err != nil {
setupLog.Error(err, "unable to create controller", "controller", "PodCrash")
os.Exit(1)
}
// +kubebuilder:scaffold:builder
if metricsCertWatcher != nil {

View File

@@ -41,11 +41,91 @@ spec:
spec:
description: NodeTainterConfigSpec defines the desired state of NodeTainterConfig.
properties:
crashLoopPolicy:
description: CrashLoopPolicy defines the policy for handling pods
in CrashLoopBackOff.
properties:
enabled:
description: Enabled toggles the CrashLoopBackOff handling feature.
type: boolean
monitoredDeployments:
description: |-
MonitoredDeployments is a list of Deployments (in "namespace/name" format)
whose pods should be monitored for CrashLoopBackOff.
items:
type: string
type: array
restartThreshold:
default: 5
description: |-
RestartThreshold is the number of container restarts after which
a pod in CrashLoopBackOff will be deleted to attempt rescheduling.
Minimum recommended value: 3 or 5.
format: int32
minimum: 1
type: integer
type: object
imageUpdatePolicy:
description: ImageUpdatePolicy defines the policy for automatic image
updates.
properties:
checkInterval:
default: 1h
description: |-
CheckInterval specifies how often to check for image updates (e.g., "5m", "1h", "15m").
Minimum interval recommended: 5m to avoid rate limiting.
pattern: ^([0-9]+(s|m|h))+$
type: string
enabled:
description: Enabled toggles the image update feature.
type: boolean
monitoredTags:
description: |-
MonitoredTags is a list of keywords found in image tags that trigger update checks.
Example: ["latest", "master", "dev"]
items:
type: string
type: array
restartAnnotation:
description: |-
RestartAnnotation is the annotation key used to trigger deployment restarts.
If empty, a default will be used (e.g., "image-updater.my-operator.com/restartedAt").
type: string
type: object
labelRules:
additionalProperties:
type: string
minProperties: 1
type: object
optOutLabelKey:
description: |-
OptOutLabelKey is the label key used to exempt Deployments from resource defaulting.
If a Deployment has a label with this key (any value), defaults won't be applied.
If empty or omitted, the opt-out mechanism is disabled.
Example: "my-operator.example.com/skip-resource-defaults"
type: string
resourceDefaults:
description: |-
ResourceDefaults contains the default requests/limits to apply.
If this section is omitted, resource defaulting is disabled.
properties:
cpuLimit:
description: Default CPU limit (e.g., "500m"). Applied if a container
has no CPU limit.
type: string
cpuRequest:
description: Default CPU request (e.g., "100m"). Applied if a
container has no CPU request.
type: string
memoryLimit:
description: Default Memory limit (e.g., "512Mi"). Applied if
a container has no Memory limit.
type: string
memoryRequest:
description: Default Memory request (e.g., "128Mi"). Applied if
a container has no Memory request.
type: string
type: object
type: object
status:
description: NodeTainterConfigStatus defines the observed state of NodeTainterConfig.

View File

@@ -21,6 +21,33 @@ rules:
- patch
- update
- watch
- apiGroups:
- ""
resources:
- pods
verbs:
- delete
- get
- list
- watch
- apiGroups:
- apps
resources:
- deployments
verbs:
- get
- list
- patch
- update
- watch
- apiGroups:
- apps
resources:
- replicasets
verbs:
- get
- list
- watch
- apiGroups:
- operator.andy.vendetti.ru
resources:

View File

@@ -10,3 +10,20 @@ spec:
"andy.vendetti.ru/category=priority": "workload/priority=high:NoSchedule"
"andy.vendetti.ru/category=gpu": "nvidia.com/gpu=present:NoSchedule"
"andy.vendetti.ru/category=svc": "workload/type=service:NoSchedule"
resourceDefaults:
cpuRequest: "100m"
memoryRequest: "128Mi"
cpuLimit: "500m"
memoryLimit: "512Mi"
optOutLabelKey: "andy.vendetti.ru/skip-resource-defaults"
imageUpdatePolicy:
enabled: true
checkInterval: "5m"
monitoredTags: ["latest", "dev", "master"]
# restartAnnotation: "andy.vendetti.ru/restartedAt"
crashLoopPolicy:
enabled: true
restartThreshold: 5
monitoredDeployments:
- "default/hello-updater-test"
- "app-namespace/critical-app-deployment"

38
go.mod
View File

@@ -20,7 +20,11 @@ require (
github.com/blang/semver/v4 v4.0.0 // indirect
github.com/cenkalti/backoff/v4 v4.3.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/containerd/stargz-snapshotter/estargz v0.16.3 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
github.com/docker/cli v27.5.0+incompatible // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.8.2 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/evanphx/json-patch/v5 v5.9.11 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
@@ -39,6 +43,7 @@ require (
github.com/google/cel-go v0.22.0 // indirect
github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect
github.com/google/go-containerregistry v0.20.3 // indirect
github.com/google/gofuzz v1.2.0 // indirect
github.com/google/pprof v0.0.0-20241029153458-d1b30febd7db // indirect
github.com/google/uuid v1.6.0 // indirect
@@ -46,43 +51,50 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect
github.com/mailru/easyjson v0.7.7 // indirect
github.com/mitchellh/go-homedir v1.1.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/prometheus/client_golang v1.19.1 // indirect
github.com/prometheus/client_model v0.6.1 // indirect
github.com/prometheus/common v0.55.0 // indirect
github.com/prometheus/procfs v0.15.1 // indirect
github.com/sirupsen/logrus v1.9.3 // indirect
github.com/spf13/cobra v1.8.1 // indirect
github.com/spf13/pflag v1.0.5 // indirect
github.com/stoewer/go-strcase v1.3.0 // indirect
github.com/vbatts/tar-split v0.11.6 // indirect
github.com/x448/float16 v0.8.4 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 // indirect
go.opentelemetry.io/otel v1.28.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.33.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 // indirect
go.opentelemetry.io/otel/metric v1.28.0 // indirect
go.opentelemetry.io/otel/sdk v1.28.0 // indirect
go.opentelemetry.io/otel/trace v1.28.0 // indirect
go.opentelemetry.io/otel/metric v1.33.0 // indirect
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
go.opentelemetry.io/otel/trace v1.33.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
go.uber.org/multierr v1.11.0 // indirect
go.uber.org/zap v1.27.0 // indirect
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 // indirect
golang.org/x/net v0.30.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/sync v0.8.0 // indirect
golang.org/x/sys v0.26.0 // indirect
golang.org/x/term v0.25.0 // indirect
golang.org/x/text v0.19.0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/term v0.28.0 // indirect
golang.org/x/text v0.21.0 // indirect
golang.org/x/time v0.7.0 // indirect
golang.org/x/tools v0.26.0 // indirect
golang.org/x/tools v0.29.0 // indirect
gomodules.xyz/jsonpatch/v2 v2.4.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240826202546-f6391c0de4c7 // indirect
google.golang.org/grpc v1.65.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
google.golang.org/protobuf v1.36.3 // indirect
gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

51
go.sum
View File

@@ -12,12 +12,20 @@ github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK3
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/containerd/stargz-snapshotter/estargz v0.16.3 h1:7evrXtoh1mSbGj/pfRccTampEyKpjpOnS3CyiV1Ebr8=
github.com/containerd/stargz-snapshotter/estargz v0.16.3/go.mod h1:uyr4BfYfOj3G9WBVE8cOlQmXAbPN9VEQpBBeJIuOipU=
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/docker/cli v27.5.0+incompatible h1:aMphQkcGtpHixwwhAXJT1rrK/detk2JIvDaFkLctbGM=
github.com/docker/cli v27.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g=
github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
github.com/evanphx/json-patch v0.5.2 h1:xVCHIVMUu1wtM/VkR9jVZ45N3FhZfYMMYGorLCR8P3k=
@@ -60,6 +68,8 @@ github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYu
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-containerregistry v0.20.3 h1:oNx7IdTI936V8CQRveCjaxOiegWwvM7kqkbXTpyiovI=
github.com/google/go-containerregistry v0.20.3/go.mod h1:w00pIgBRDVUDFM6bq+Qx8lwNWK+cxgCuX1vd3PIBDNI=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@@ -77,6 +87,8 @@ github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnr
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -86,6 +98,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y=
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
@@ -97,6 +111,10 @@ github.com/onsi/ginkgo/v2 v2.22.0 h1:Yed107/8DjTr0lKCNt7Dn8yQ6ybuDRQoMGrNFKzMfHg
github.com/onsi/ginkgo/v2 v2.22.0/go.mod h1:7Du3c42kxCUegi0IImZ1wUQzMBVecgIHjR1C+NkhLQo=
github.com/onsi/gomega v1.36.1 h1:bJDPBO7ibjxcbHMgSCoo4Yj18UWbKDlLwX1x9sybDcw=
github.com/onsi/gomega v1.36.1/go.mod h1:PvZbdDc8J6XJEpDK4HCuRBm8a6Fzp9/DmhC9C7yFlog=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
@@ -113,6 +131,8 @@ github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoG
github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU5NdKM8=
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM=
github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y=
github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA=
@@ -123,29 +143,44 @@ github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/vbatts/tar-split v0.11.6 h1:4SjTW5+PU11n6fZenf2IPoV8/tz3AaYHMWjf23envGs=
github.com/vbatts/tar-split v0.11.6/go.mod h1:dqKNtesIOr2j2Qv3W/cHjnvk9I8+G7oAkFDFN6TCBEI=
github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0 h1:4K4tsIXefpVJtvA/8srF4V4y0akAoPHkIslgAkjixJA=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.53.0/go.mod h1:jjdQuTGVsXV4vSs+CJ2qYDeDPf9yIJV23qlIzBm73Vg=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.28.0 h1:/SqNcYk+idO0CxKEUOtKQClMK/MimZihKYMruSMViUo=
go.opentelemetry.io/otel v1.28.0/go.mod h1:q68ijF8Fc8CnMHKyzqL6akLO46ePnjkgfIMIjUIX9z4=
go.opentelemetry.io/otel v1.33.0 h1:/FerN9bax5LoK51X/sI0SVYrjSE0/yUL7DpxW4K3FWw=
go.opentelemetry.io/otel v1.33.0/go.mod h1:SUUkR6csvUQl+yjReHu5uM3EtVV7MBm5FHKRlNx4I8I=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0 h1:3Q/xZUyC1BBkualc9ROb4G8qkH90LXEIICcs5zv1OYY=
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.28.0/go.mod h1:s75jGIWA9OfCMzF0xr+ZgfrB5FEbbV7UuYo32ahUiFI=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0 h1:qFffATk0X+HD+f1Z8lswGiOQYKHRlzfmdJm0wEaVrFA=
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.27.0/go.mod h1:MOiCmryaYtc+V0Ei+Tx9o5S1ZjA7kzLucuVuyzBZloQ=
go.opentelemetry.io/otel/metric v1.28.0 h1:f0HGvSl1KRAU1DLgLGFjrwVyismPlnuU6JD6bOeuA5Q=
go.opentelemetry.io/otel/metric v1.28.0/go.mod h1:Fb1eVBFZmLVTMb6PPohq3TO9IIhUisDsbJoL/+uQW4s=
go.opentelemetry.io/otel/metric v1.33.0 h1:r+JOocAyeRVXD8lZpjdQjzMadVZp2M4WmQ+5WtEnklQ=
go.opentelemetry.io/otel/metric v1.33.0/go.mod h1:L9+Fyctbp6HFTddIxClbQkjtubW6O9QS3Ann/M82u6M=
go.opentelemetry.io/otel/sdk v1.28.0 h1:b9d7hIry8yZsgtbmM0DKyPWMMUMlK9NEKuIG4aBqWyE=
go.opentelemetry.io/otel/sdk v1.28.0/go.mod h1:oYj7ClPUA7Iw3m+r7GeEjz0qckQRJK2B8zjcZEfu7Pg=
go.opentelemetry.io/otel/sdk v1.33.0 h1:iax7M131HuAm9QkZotNHEfstof92xM+N8sr3uHXc2IM=
go.opentelemetry.io/otel/sdk v1.33.0/go.mod h1:A1Q5oi7/9XaMlIWzPSxLRWOI8nG3FnzHJNbiENQuihM=
go.opentelemetry.io/otel/trace v1.28.0 h1:GhQ9cUuQGmNDd5BTCP2dAvv75RdMxEfTmYejp+lkx9g=
go.opentelemetry.io/otel/trace v1.28.0/go.mod h1:jPyXzNPg6da9+38HEwElrQiHlVMTnVfM3/yv2OlIHaI=
go.opentelemetry.io/otel/trace v1.33.0 h1:cCJuF7LRjUFso9LPnEAHJDB2pqzp+hbO8eu1qqW2d/s=
go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37CbGV4fr1f2nBck=
go.opentelemetry.io/proto/otlp v1.3.1 h1:TrMUixzpM0yuc/znrFTP9MMRh8trP93mkCiDVeXrui0=
go.opentelemetry.io/proto/otlp v1.3.1/go.mod h1:0X1WI4de4ZsLrrJNLAQbFeLCm3T7yBkR0XqQ7niQU+8=
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
@@ -167,24 +202,37 @@ golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.30.0 h1:AcW1SDZMkb8IpzCdQUaIq2sP4sZ4zw+55h6ynffypl4=
golang.org/x/net v0.30.0/go.mod h1:2wGyMJ5iFasEhkwi13ChkO/t1ECNC4X4eBKkVFyYFlU=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ=
golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo=
golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24=
golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM=
golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -193,6 +241,7 @@ golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roY
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.26.0 h1:v/60pFQmzmT9ExmjDv2gGIfi3OqfKoEP6I5+umXlbnQ=
golang.org/x/tools v0.26.0/go.mod h1:TPVVj70c7JJ3WCazhD8OdXcZg/og+b9+tH/KxylGwH0=
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
@@ -207,6 +256,8 @@ google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/protobuf v1.36.3 h1:82DV7MYdb8anAVi3qge1wSnMDrnKK7ebr+I0hHRN1BU=
google.golang.org/protobuf v1.36.3/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=

View File

@@ -0,0 +1,218 @@
// internal/controller/deploymentdefaults_controller.go
package controller
import (
"context"
"fmt"
"strings"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/api/resource"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/handler"
"sigs.k8s.io/controller-runtime/pkg/log"
"sigs.k8s.io/controller-runtime/pkg/reconcile"
configv1alpha1 "git.vendetti.ru/andy/operator/api/v1alpha1"
)
// DeploymentDefaultsReconciler reconciles Deployment objects to apply default resources.
type DeploymentDefaultsReconciler struct {
client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder
}
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups=operator.andy.vendetti.ru,resources=nodetainterconfigs,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
func (r *DeploymentDefaultsReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx).WithValues("deployment", req.NamespacedName)
var deployment appsv1.Deployment
if err := r.Get(ctx, req.NamespacedName, &deployment); err != nil {
if errors.IsNotFound(err) {
log.Info("Deployment not found. Ignoring.")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get Deployment")
return ctrl.Result{}, err // Requeue on error
}
var config configv1alpha1.NodeTainterConfig
configKey := types.NamespacedName{Name: GlobalTaintConfigName}
if err := r.Get(ctx, configKey, &config); err != nil {
if errors.IsNotFound(err) {
log.Info("Global NodeTainterConfig not found, skipping resource defaulting", "configName", GlobalTaintConfigName)
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get NodeTainterConfig for defaults", "configName", GlobalTaintConfigName)
r.Recorder.Eventf(&deployment, corev1.EventTypeWarning, "ConfigError", "Failed to get config %s: %v", GlobalTaintConfigName, err)
return ctrl.Result{}, err
}
if config.Spec.ResourceDefaults == nil {
log.V(1).Info("Resource defaulting is disabled in NodeTainterConfig.")
return ctrl.Result{}, nil
}
optOutKey := strings.TrimSpace(config.Spec.OptOutLabelKey)
if optOutKey != "" {
labels := deployment.GetLabels()
if _, exists := labels[optOutKey]; exists {
log.Info("Deployment has opt-out label, skipping resource defaulting", "labelKey", optOutKey)
r.Recorder.Eventf(&deployment, corev1.EventTypeNormal, "OptedOut", "Skipping resource defaulting due to label %s", optOutKey)
return ctrl.Result{}, nil
}
}
defaults := config.Spec.ResourceDefaults
defaultCPUReq, errCPUReq := parseQuantity(defaults.CPURequest)
defaultMemReq, errMemReq := parseQuantity(defaults.MemoryRequest)
defaultCPULim, errCPULim := parseQuantity(defaults.CPULimit)
defaultMemLim, errMemLim := parseQuantity(defaults.MemoryLimit)
var parseErrors []string
if errCPUReq != nil {
parseErrors = append(parseErrors, fmt.Sprintf("CPURequest: %v", errCPUReq))
}
if errMemReq != nil {
parseErrors = append(parseErrors, fmt.Sprintf("MemoryRequest: %v", errMemReq))
}
if errCPULim != nil {
parseErrors = append(parseErrors, fmt.Sprintf("CPULimit: %v", errCPULim))
}
if errMemLim != nil {
parseErrors = append(parseErrors, fmt.Sprintf("MemoryLimit: %v", errMemLim))
}
if len(parseErrors) > 0 {
parsingError := fmt.Errorf("invalid resource quantity format in NodeTainterConfig %s: %s", config.Name, strings.Join(parseErrors, "; "))
log.Error(parsingError, "Default resource parsing failed", "configName", config.Name, "parsingErrors", parseErrors)
r.Recorder.Eventf(&deployment, corev1.EventTypeWarning, "ConfigError", parsingError.Error())
return ctrl.Result{}, nil
}
deploymentCopy := deployment.DeepCopy()
mutated := false
for i, container := range deploymentCopy.Spec.Template.Spec.Containers {
containerName := container.Name
log := log.WithValues("container", containerName)
if deploymentCopy.Spec.Template.Spec.Containers[i].Resources.Requests == nil {
deploymentCopy.Spec.Template.Spec.Containers[i].Resources.Requests = corev1.ResourceList{}
}
if deploymentCopy.Spec.Template.Spec.Containers[i].Resources.Limits == nil {
deploymentCopy.Spec.Template.Spec.Containers[i].Resources.Limits = corev1.ResourceList{}
}
requests := deploymentCopy.Spec.Template.Spec.Containers[i].Resources.Requests
limits := deploymentCopy.Spec.Template.Spec.Containers[i].Resources.Limits
if _, exists := requests[corev1.ResourceCPU]; !exists && defaultCPUReq != nil {
requests[corev1.ResourceCPU] = *defaultCPUReq
log.V(1).Info("Applied default CPU request", "value", defaultCPUReq.String())
mutated = true
}
if _, exists := requests[corev1.ResourceMemory]; !exists && defaultMemReq != nil {
requests[corev1.ResourceMemory] = *defaultMemReq
log.V(1).Info("Applied default Memory request", "value", defaultMemReq.String())
mutated = true
}
if _, exists := limits[corev1.ResourceCPU]; !exists && defaultCPULim != nil {
limits[corev1.ResourceCPU] = *defaultCPULim
log.V(1).Info("Applied default CPU limit", "value", defaultCPULim.String())
mutated = true
}
if _, exists := limits[corev1.ResourceMemory]; !exists && defaultMemLim != nil {
limits[corev1.ResourceMemory] = *defaultMemLim
log.V(1).Info("Applied default Memory limit", "value", defaultMemLim.String())
mutated = true
}
}
if mutated {
log.Info("Applying default resource requests/limits to Deployment")
if err := r.Patch(ctx, deploymentCopy, client.MergeFrom(&deployment)); err != nil {
log.Error(err, "Failed to patch Deployment with default resources")
r.Recorder.Eventf(&deployment, corev1.EventTypeWarning, "UpdateFailed", "Failed to apply default resources: %v", err)
return ctrl.Result{}, err
}
log.Info("Successfully applied default resources")
r.Recorder.Eventf(&deployment, corev1.EventTypeNormal, "DefaultsApplied", "Default resource requests/limits applied")
} else {
log.V(1).Info("Deployment already has necessary resource requests/limits or no defaults configured.")
}
return ctrl.Result{}, nil
}
func parseQuantity(s string) (*resource.Quantity, error) {
s = strings.TrimSpace(s)
if s == "" {
return nil, nil
}
q, err := resource.ParseQuantity(s)
if err != nil {
return nil, fmt.Errorf("invalid quantity format '%s': %w", s, err)
}
return &q, nil
}
// Map function for NodeTainterConfig: Trigger reconcile for ALL Deployments when the specific config changes
func (r *DeploymentDefaultsReconciler) mapConfigToDeployments(ctx context.Context, obj client.Object) []reconcile.Request {
config, ok := obj.(*configv1alpha1.NodeTainterConfig)
log := log.FromContext(ctx)
if !ok || config.Name != GlobalTaintConfigName {
return nil
}
log.Info("Global NodeTainterConfig changed, queuing reconciliation for all deployments potentially affected by resource defaults", "configName", config.Name)
var deploymentList appsv1.DeploymentList
if err := r.List(ctx, &deploymentList, client.InNamespace("")); err != nil {
log.Error(err, "Failed to list deployments for config change")
return nil
}
requests := make([]reconcile.Request, 0, len(deploymentList.Items))
optOutKey := strings.TrimSpace(config.Spec.OptOutLabelKey)
for _, deployment := range deploymentList.Items {
if optOutKey != "" {
labels := deployment.GetLabels()
if _, exists := labels[optOutKey]; exists {
continue
}
}
requests = append(requests, reconcile.Request{
NamespacedName: types.NamespacedName{
Name: deployment.Name,
Namespace: deployment.Namespace,
},
})
}
log.Info("Queued deployment reconcile requests", "count", len(requests))
return requests
}
func (r *DeploymentDefaultsReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Recorder = mgr.GetEventRecorderFor("deploymentdefaults-controller")
return ctrl.NewControllerManagedBy(mgr).
Named("deploymentdefaults").
For(&appsv1.Deployment{}).
Watches(
&configv1alpha1.NodeTainterConfig{},
handler.EnqueueRequestsFromMapFunc(r.mapConfigToDeployments),
).
Complete(r)
}

View File

@@ -0,0 +1,221 @@
// internal/controller/imageupdate_controller.go
package controller
import (
"context"
"fmt"
"strings"
"time"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
configv1alpha1 "git.vendetti.ru/andy/operator/api/v1alpha1"
"github.com/google/go-containerregistry/pkg/crane"
"github.com/google/go-containerregistry/pkg/name"
)
const (
DefaultRestartAnnotation = "andy.vendetti.ru/restartedAt"
)
// ImageUpdateReconciler reconciles Deployment objects to check for image updates.
type ImageUpdateReconciler struct {
client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder
}
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch;update;patch
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch
// +kubebuilder:rbac:groups=operator.andy.vendetti.ru,resources=nodetainterconfigs,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
func (r *ImageUpdateReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx).WithValues("deployment", req.NamespacedName)
var config configv1alpha1.NodeTainterConfig
configKey := types.NamespacedName{Name: GlobalTaintConfigName}
if err := r.Get(ctx, configKey, &config); err != nil {
if !errors.IsNotFound(err) {
log.Error(err, "Failed to get NodeTainterConfig for image updates", "configName", GlobalTaintConfigName)
return ctrl.Result{}, err
}
log.V(1).Info("Global NodeTainterConfig not found, image update checks skipped.", "configName", GlobalTaintConfigName)
return ctrl.Result{}, nil
}
if config.Spec.ImageUpdatePolicy == nil || !config.Spec.ImageUpdatePolicy.Enabled {
log.V(1).Info("Image update policy is disabled in NodeTainterConfig.")
return ctrl.Result{}, nil
}
policy := config.Spec.ImageUpdatePolicy
if len(policy.MonitoredTags) == 0 {
log.V(1).Info("No monitored tags configured in ImageUpdatePolicy.")
return ctrl.Result{}, nil
}
var deployment appsv1.Deployment
if err := r.Get(ctx, req.NamespacedName, &deployment); err != nil {
if errors.IsNotFound(err) {
log.Info("Deployment not found. Ignoring.")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get Deployment")
return ctrl.Result{}, err
}
log.V(1).Info("Checking deployment for image updates")
needsRestart := false
restartAnnotation := policy.RestartAnnotation
if restartAnnotation == "" {
restartAnnotation = DefaultRestartAnnotation
}
podList, err := r.findPodsForDeployment(ctx, &deployment)
if err != nil {
log.Error(err, "Failed to list pods for deployment")
return ctrl.Result{RequeueAfter: 1 * time.Minute}, nil
}
for _, container := range deployment.Spec.Template.Spec.Containers {
containerLog := log.WithValues("container", container.Name, "image", container.Image)
imageName, imageTag, isMonitored := r.parseImageAndCheckTag(container.Image, policy.MonitoredTags)
if !isMonitored {
containerLog.V(1).Info("Image tag is not monitored, skipping.")
continue
}
currentDigest, err := r.findCurrentImageDigest(container.Name, podList)
if err != nil {
containerLog.Error(err, "Could not determine current image digest from running pods")
continue
}
if currentDigest == "" {
containerLog.V(1).Info("No running pods found with imageID for this container, skipping check.")
continue
}
containerLog = containerLog.WithValues("currentDigest", currentDigest)
latestDigest, err := crane.Digest(imageName + ":" + imageTag)
if err != nil {
containerLog.Error(err, "Failed to get latest digest from registry", "image", imageName+":"+imageTag)
r.Recorder.Eventf(&deployment, corev1.EventTypeWarning, "RegistryError", "Failed to fetch digest for %s:%s: %v", imageName, imageTag, err)
continue
}
containerLog = containerLog.WithValues("latestDigest", latestDigest)
if currentDigest != latestDigest {
containerLog.Info("Image update detected!", "image", imageName+":"+imageTag)
r.Recorder.Eventf(&deployment, corev1.EventTypeNormal, "UpdateAvailable", "New digest %s detected for image %s:%s (current: %s)", latestDigest, imageName, imageTag, currentDigest)
needsRestart = true
break
} else {
containerLog.V(1).Info("Image is up-to-date.")
}
}
if needsRestart {
deploymentCopy := deployment.DeepCopy()
if deploymentCopy.Spec.Template.Annotations == nil {
deploymentCopy.Spec.Template.Annotations = make(map[string]string)
}
restartValue := time.Now().Format(time.RFC3339)
deploymentCopy.Spec.Template.Annotations[restartAnnotation] = restartValue
log.Info("Triggering deployment restart due to image update", "annotationKey", restartAnnotation, "annotationValue", restartValue)
if err := r.Patch(ctx, deploymentCopy, client.MergeFrom(&deployment)); err != nil {
log.Error(err, "Failed to patch Deployment to trigger restart")
r.Recorder.Eventf(&deployment, corev1.EventTypeWarning, "UpdateFailed", "Failed to trigger restart: %v", err)
return ctrl.Result{}, err
}
log.Info("Deployment patched successfully to trigger restart.")
r.Recorder.Eventf(&deployment, corev1.EventTypeNormal, "RestartTriggered", "Triggered restart due to updated image")
}
checkInterval, err := time.ParseDuration(policy.CheckInterval)
if err != nil {
log.Error(err, "Failed to parse CheckInterval from config, using default 1h", "configuredInterval", policy.CheckInterval)
checkInterval = time.Hour // Fallback
}
log.V(1).Info("Requeuing deployment for next check", "after", checkInterval.String())
return ctrl.Result{RequeueAfter: checkInterval}, nil
}
func (r *ImageUpdateReconciler) parseImageAndCheckTag(image string, monitoredTags []string) (imgName, imgTag string, monitored bool) {
ref, err := name.ParseReference(image, name.WeakValidation)
if err != nil {
return "", "", false
}
imgName = ref.Context().Name()
imgTag = ref.Identifier()
if strings.HasPrefix(imgTag, "sha256:") {
return imgName, imgTag, false
}
for _, monitoredKeyword := range monitoredTags {
if strings.Contains(imgTag, monitoredKeyword) {
return imgName, imgTag, true
}
}
return imgName, imgTag, false
}
func (r *ImageUpdateReconciler) findPodsForDeployment(ctx context.Context, deployment *appsv1.Deployment) (*corev1.PodList, error) {
selector, err := metav1.LabelSelectorAsSelector(deployment.Spec.Selector)
if err != nil {
return nil, fmt.Errorf("failed to convert deployment selector: %w", err)
}
podList := &corev1.PodList{}
err = r.List(ctx, podList, client.InNamespace(deployment.Namespace), client.MatchingLabelsSelector{Selector: selector})
if err != nil {
return nil, fmt.Errorf("failed to list pods: %w", err)
}
return podList, nil
}
func (r *ImageUpdateReconciler) findCurrentImageDigest(containerName string, podList *corev1.PodList) (string, error) {
for _, pod := range podList.Items {
if pod.Status.Phase != corev1.PodRunning && pod.Status.Phase != corev1.PodPending {
continue
}
for _, cs := range pod.Status.ContainerStatuses {
if cs.Name == containerName && cs.ImageID != "" {
parts := strings.SplitN(cs.ImageID, "@", 2)
if len(parts) == 2 && strings.HasPrefix(parts[1], "sha256:") {
return parts[1], nil
}
if strings.HasPrefix(cs.ImageID, "sha256:") {
return cs.ImageID, nil
}
return "", fmt.Errorf("unrecognized imageID format in pod %s: %s", pod.Name, cs.ImageID)
}
}
}
return "", nil
}
func (r *ImageUpdateReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Recorder = mgr.GetEventRecorderFor("imageupdate-controller")
return ctrl.NewControllerManagedBy(mgr).
Named("imageupdate").
For(&appsv1.Deployment{}).
Complete(r)
}

View File

@@ -126,11 +126,11 @@ func (r *NodeTainterConfigReconciler) Reconcile(ctx context.Context, req ctrl.Re
// Converting map[string]string into map[string]corev1.Taint for convenience
parsedRules, parseErrs := parseLabelRulesFromSpec(specLabelRules)
if len(parseErrs) > 0 {
errMsg := fmt.Sprintf("Invalid rules found in NodeTainterConfig %s: %v", config.Name, parseErrs)
log.Error(fmt.Errorf(errMsg), "Rule parsing failed")
r.Recorder.Eventf(&config, corev1.EventTypeWarning, "InvalidConfig", errMsg)
_ = r.updateCRDStatus(ctx, &config, metav1.ConditionFalse, ConditionReasonConfigParsingError, errMsg)
_ = r.updateNodeTaintStatus(ctx, &node, nil, errMsg)
parsingError := fmt.Errorf("invalid rules found in NodeTainterConfig %s: %v", config.Name, parseErrs)
log.Error(parsingError, "Rule parsing failed", "configName", config.Name, "parsingErrors", parseErrs)
r.Recorder.Eventf(&config, corev1.EventTypeWarning, "InvalidConfig", parsingError.Error())
_ = r.updateCRDStatus(ctx, &config, metav1.ConditionFalse, ConditionReasonConfigParsingError, parsingError.Error())
_ = r.updateNodeTaintStatus(ctx, &node, nil, parsingError.Error())
return ctrl.Result{}, nil
}
@@ -221,23 +221,23 @@ func (r *NodeTainterConfigReconciler) SetupWithManager(mgr ctrl.Manager) error {
// --- UTILS FUNCTIONS ---
// TaintToString конвертирует тейнт в строку для статуса/логов
// Converts Taint to string for status/logs
func TaintToString(taint *corev1.Taint) string {
return fmt.Sprintf("%s=%s:%s", taint.Key, taint.Value, taint.Effect)
}
// TaintsToString конвертирует слайс тейнтов в слайс строк
// Converts Taints slice to string slice
func TaintsToStrings(taints []corev1.Taint) []string {
res := make([]string, len(taints))
for i, t := range taints {
res[i] = TaintToString(&t)
}
sort.Strings(res) // Сортируем для консистентности статуса
sort.Strings(res)
return res
}
// parseLabelRulesFromSpec парсит правила из CRD Spec
// Возвращает map["labelKey=labelValue"]corev1.Taint и ошибки
// Parses rules from CRD Spec
// returns map["labelKey=labelValue"]corev1.Taint and errors
func parseLabelRulesFromSpec(specLabelRules map[string]string) (map[string]corev1.Taint, []error) {
parsed := make(map[string]corev1.Taint)
var errs []error
@@ -251,25 +251,24 @@ func parseLabelRulesFromSpec(specLabelRules map[string]string) (map[string]corev
continue
}
// Парсим селектор "key=value"
partsSelector := strings.SplitN(ruleSelector, "=", 2)
if len(partsSelector) != 2 { // Должен быть знак =
if len(partsSelector) != 2 {
errs = append(errs, fmt.Errorf("invalid rule selector format '%s': missing '='", ruleSelector))
continue
}
labelKey := strings.TrimSpace(partsSelector[0])
labelValue := strings.TrimSpace(partsSelector[1]) // Может быть пустым!
labelValue := strings.TrimSpace(partsSelector[1])
if labelKey == "" {
errs = append(errs, fmt.Errorf("invalid rule selector format '%s': empty label key", ruleSelector))
continue
}
// Валидируем ключ лейбла
if msgs := apivalidation.IsQualifiedName(labelKey); len(msgs) > 0 {
errs = append(errs, fmt.Errorf("invalid label key in selector '%s': %v", ruleSelector, msgs))
continue
}
// Валидируем значение лейбла (если не пустое)
if labelValue != "" {
if msgs := apivalidation.IsValidLabelValue(labelValue); len(msgs) > 0 {
errs = append(errs, fmt.Errorf("invalid label value in selector '%s': %v", ruleSelector, msgs))
@@ -277,7 +276,6 @@ func parseLabelRulesFromSpec(specLabelRules map[string]string) (map[string]corev
}
}
// Парсим строку тейнта "key=value:Effect" (используем улучшенную логику из прошлого ответа)
partsEffect := strings.SplitN(taintString, ":", 2)
if len(partsEffect) != 2 || partsEffect[1] == "" {
errs = append(errs, fmt.Errorf("invalid taint format for rule '%s': '%s' (missing effect)", ruleSelector, taintString))
@@ -311,35 +309,31 @@ func parseLabelRulesFromSpec(specLabelRules map[string]string) (map[string]corev
continue
}
// Все ок
taint := corev1.Taint{Key: taintKey, Value: taintValue, Effect: effect}
parsed[ruleSelector] = taint // Ключ = "labelKey=labelValue"
parsed[ruleSelector] = taint // Key = "labelKey=labelValue"
}
return parsed, errs
}
// calculateDesiredTaints определяет тейнты на основе лейблов ноды и правил
// Defines Taints depending on node labels and rules
func calculateDesiredTaints(nodeLabels map[string]string, parsedLabelRules map[string]corev1.Taint) []corev1.Taint {
desired := []corev1.Taint{}
foundTaints := make(map[string]bool) // Для уникальности Key:Effect
foundTaints := make(map[string]bool)
if nodeLabels == nil {
nodeLabels = make(map[string]string) // Безопасность
nodeLabels = make(map[string]string)
}
for ruleSelector, taint := range parsedLabelRules {
parts := strings.SplitN(ruleSelector, "=", 2)
if len(parts) != 2 {
continue
} // Уже должно быть отва лидировано
}
ruleKey := parts[0]
ruleValue := parts[1] // Может быть пустой
ruleValue := parts[1]
actualValue, exists := nodeLabels[ruleKey]
// Логика сравнения:
// 1. Ключ лейбла должен существовать на ноде.
// 2. Значение лейбла на ноде должно ТОЧНО совпадать со значением в правиле (включая пустую строку).
if exists && actualValue == ruleValue {
taintKeyEffect := fmt.Sprintf("%s:%s", taint.Key, taint.Effect)
if !foundTaints[taintKeyEffect] {
@@ -351,22 +345,19 @@ func calculateDesiredTaints(nodeLabels map[string]string, parsedLabelRules map[s
return desired
}
// TaintKeyEffect создает уникальную строку для тейнта (Key:Effect)
// Creates unique string for Taint (Key:Effect)
func TaintKeyEffect(taint *corev1.Taint) string {
return fmt.Sprintf("%s:%s", taint.Key, taint.Effect)
}
// mergeAndCheckTaints сравнивает текущие и желаемые тейнты, управляемые оператором.
// parsedLabelRules: map["labelKey=labelValue"]corev1.Taint - содержит ВСЕ валидные правила из конфига.
// Compares current and desired controlled Taints
func mergeAndCheckTaints(currentTaints []corev1.Taint, desiredTaints []corev1.Taint, parsedLabelRules map[string]corev1.Taint) (bool, []corev1.Taint) {
// 1. Определяем, какие типы тейнтов (Key:Effect) управляются нами по всем правилам
managedTaintTypes := sets.NewString()
for _, ruleTaint := range parsedLabelRules { // Итерируем по значениям (Taint объектам)
for _, ruleTaint := range parsedLabelRules {
managedTaintTypes.Insert(TaintKeyEffect(&ruleTaint))
}
// 2. Разделяем текущие тейнты на управляемые и неуправляемые
currentManagedTaints := make(map[string]corev1.Taint) // key:Effect -> Taint
currentManagedTaints := make(map[string]corev1.Taint)
unmanagedTaints := []corev1.Taint{}
for _, taint := range currentTaints {
ke := TaintKeyEffect(&taint)
@@ -377,31 +368,27 @@ func mergeAndCheckTaints(currentTaints []corev1.Taint, desiredTaints []corev1.Ta
}
}
// 3. Создаем map желаемых тейнтов для быстрого поиска
desiredTaintsMap := make(map[string]corev1.Taint) // key:Effect -> Taint
for _, taint := range desiredTaints {
// Проверка, что желаемый тейнт действительно определен в правилах (на всякий случай)
ke := TaintKeyEffect(&taint)
if managedTaintTypes.Has(ke) {
desiredTaintsMap[ke] = taint
}
}
// 4. Сравниваем управляемые текущие и желаемые
needsUpdate := false
if len(currentManagedTaints) != len(desiredTaintsMap) {
needsUpdate = true
} else {
for ke, desiredTaint := range desiredTaintsMap {
currentTaint, exists := currentManagedTaints[ke]
if !exists || currentTaint.Value != desiredTaint.Value { // Сравниваем и значения
if !exists || currentTaint.Value != desiredTaint.Value {
needsUpdate = true
break
}
}
}
// 5. Собираем новый список тейнтов, если нужно обновление
if needsUpdate {
newTaints := make([]corev1.Taint, 0, len(unmanagedTaints)+len(desiredTaintsMap))
newTaints = append(newTaints, unmanagedTaints...)
@@ -409,7 +396,7 @@ func mergeAndCheckTaints(currentTaints []corev1.Taint, desiredTaints []corev1.Ta
for ke := range desiredTaintsMap {
desiredKeys = append(desiredKeys, ke)
}
sort.Strings(desiredKeys) // Сортируем для консистентности
sort.Strings(desiredKeys)
for _, ke := range desiredKeys {
newTaints = append(newTaints, desiredTaintsMap[ke])
}
@@ -419,16 +406,14 @@ func mergeAndCheckTaints(currentTaints []corev1.Taint, desiredTaints []corev1.Ta
return false, currentTaints
}
// updateCRDStatus обновляет статус ресурса NodeTainterConfig
// TODO: Вызывать эту функцию при изменении CRD или при старте/ошибках контроллера.
// Updates NodeTainterConfig status
// TODO: Call this function on CRD updates or controller start/errors
func (r *NodeTainterConfigReconciler) updateCRDStatus(ctx context.Context, config *configv1alpha1.NodeTainterConfig, status metav1.ConditionStatus, reason, message string) error {
log := log.FromContext(ctx).WithValues("config", config.Name)
configCopy := config.DeepCopy()
// Устанавливаем observedGeneration
configCopy.Status.ObservedGeneration = config.Generation
// Обновляем Condition
newCondition := metav1.Condition{
Type: ConditionTypeReady,
Status: status,
@@ -436,16 +421,12 @@ func (r *NodeTainterConfigReconciler) updateCRDStatus(ctx context.Context, confi
Message: message,
LastTransitionTime: metav1.Now(),
}
// TODO: Использовать 'meta.SetStatusCondition' из 'k8s.io/apimachinery/pkg/api/meta' для правильного обновления conditions
// Примерно так:
// meta.SetStatusCondition(&configCopy.Status.Conditions, newCondition)
// Пока просто заменяем для простоты
// TODO: Use 'meta.SetStatusCondition' from 'k8s.io/apimachinery/pkg/api/meta' for correct conditions updates
configCopy.Status.Conditions = []metav1.Condition{newCondition}
// TODO: Обновить NodeTaintStatus на основе данных со всех нод (может быть сложно и затратно)
// TODO: Update NodeTaintStatus based on data from all nodes
// configCopy.Status.NodeTaintStatus = ...
// Используем Patch для обновления статуса
if err := r.Status().Patch(ctx, configCopy, client.MergeFrom(config)); err != nil {
log.Error(err, "Failed to patch NodeTainterConfig status")
return err
@@ -454,26 +435,19 @@ func (r *NodeTainterConfigReconciler) updateCRDStatus(ctx context.Context, confi
return nil
}
// updateNodeTaintStatus обновляет информацию о тейнтах для конкретной ноды в статусе CRD
// TODO: Эта функция в текущем виде будет вызывать конфликты, т.к. каждый Reconcile ноды
// будет пытаться перезаписать весь Status.NodeTaintStatus.
// Правильный подход: читать текущий статус CRD, обновлять только запись для текущей ноды, патчить.
// Это усложняет код, пока оставим так для демонстрации, но ЭТО НУЖНО ИСПРАВИТЬ для production.
// Updates info about Taints for correct Node in CRD status
func (r *NodeTainterConfigReconciler) updateNodeTaintStatus(ctx context.Context, node *corev1.Node, appliedTaints []corev1.Taint, errorMsg string) error {
log := log.FromContext(ctx).WithValues("node", node.Name)
var config configv1alpha1.NodeTainterConfig
configKey := types.NamespacedName{Name: GlobalTaintConfigName}
// Получаем CRD еще раз, чтобы обновить его статус
if err := r.Get(ctx, configKey, &config); err != nil {
log.Error(err, "Failed to get NodeTainterConfig for status update", "configName", GlobalTaintConfigName)
// Не можем обновить статус, если не получили CRD
return fmt.Errorf("failed to get config %s for status update: %w", GlobalTaintConfigName, err)
}
configCopy := config.DeepCopy()
// Ищем статус для текущей ноды
found := false
nodeStatus := configv1alpha1.NodeTaintInfo{
NodeName: node.Name,
@@ -492,12 +466,10 @@ func (r *NodeTainterConfigReconciler) updateNodeTaintStatus(ctx context.Context,
configCopy.Status.NodeTaintStatus = append(configCopy.Status.NodeTaintStatus, nodeStatus)
}
// Сортируем для консистентности
sort.Slice(configCopy.Status.NodeTaintStatus, func(i, j int) bool {
return configCopy.Status.NodeTaintStatus[i].NodeName < configCopy.Status.NodeTaintStatus[j].NodeName
})
// Патчим статус
if err := r.Status().Patch(ctx, configCopy, client.MergeFrom(&config)); err != nil {
log.Error(err, "Failed to patch NodeTainterConfig status with node info", "node", node.Name)
return err

View File

@@ -0,0 +1,163 @@
// internal/controller/podcrash_controller.go
package controller
import (
"context"
"fmt"
appsv1 "k8s.io/api/apps/v1"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/apimachinery/pkg/types"
"k8s.io/client-go/tools/record"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
configv1alpha1 "git.vendetti.ru/andy/operator/api/v1alpha1"
)
// PodCrashReconciler reconciles Pods to detect and handle CrashLoopBackOff state.
type PodCrashReconciler struct {
client.Client
Scheme *runtime.Scheme
Recorder record.EventRecorder
}
// +kubebuilder:rbac:groups="",resources=pods,verbs=get;list;watch;delete
// +kubebuilder:rbac:groups=operator.andy.vendetti.ru,resources=nodetainterconfigs,verbs=get;list;watch
// +kubebuilder:rbac:groups="",resources=events,verbs=create;patch
// +kubebuilder:rbac:groups=apps,resources=replicasets,verbs=get;list;watch
// +kubebuilder:rbac:groups=apps,resources=deployments,verbs=get;list;watch
func (r *PodCrashReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
log := log.FromContext(ctx).WithValues("pod", req.NamespacedName)
var config configv1alpha1.NodeTainterConfig
configKey := types.NamespacedName{Name: GlobalTaintConfigName}
if err := r.Get(ctx, configKey, &config); err != nil {
if !errors.IsNotFound(err) {
log.Error(err, "Failed to get NodeTainterConfig for crash loop policy", "configName", GlobalTaintConfigName)
return ctrl.Result{}, err // Requeue on real error
}
log.V(1).Info("Global NodeTainterConfig not found, crash loop handling skipped.", "configName", GlobalTaintConfigName)
return ctrl.Result{}, nil
}
if config.Spec.CrashLoopPolicy == nil || !config.Spec.CrashLoopPolicy.Enabled {
log.V(1).Info("Crash loop policy is disabled in NodeTainterConfig.")
return ctrl.Result{}, nil
}
policy := config.Spec.CrashLoopPolicy
if len(policy.MonitoredDeployments) == 0 {
log.V(1).Info("No monitored deployments configured in CrashLoopPolicy.")
return ctrl.Result{}, nil
}
monitoredSet := make(map[string]struct{}, len(policy.MonitoredDeployments))
for _, item := range policy.MonitoredDeployments {
monitoredSet[item] = struct{}{}
}
var pod corev1.Pod
if err := r.Get(ctx, req.NamespacedName, &pod); err != nil {
if errors.IsNotFound(err) {
log.Info("Pod not found. Ignoring.")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to get Pod")
return ctrl.Result{}, err // Requeue on error
}
ownerDeploymentName, isOwnedByMonitoredDeployment := r.getOwnerDeploymentIfMonitored(ctx, &pod, monitoredSet)
if !isOwnedByMonitoredDeployment {
log.V(1).Info("Pod is not owned by a monitored Deployment, skipping.")
return ctrl.Result{}, nil
}
log = log.WithValues("deployment", ownerDeploymentName)
podShouldBeDeleted := false
var crashingContainerName string
var restartCount int32
for _, status := range pod.Status.ContainerStatuses {
if status.State.Waiting != nil && status.State.Waiting.Reason == "CrashLoopBackOff" {
if status.RestartCount >= policy.RestartThreshold {
podShouldBeDeleted = true
crashingContainerName = status.Name
restartCount = status.RestartCount
log.Info("Pod needs deletion due to CrashLoopBackOff threshold",
"container", crashingContainerName,
"restarts", restartCount,
"threshold", policy.RestartThreshold)
break
} else {
log.V(1).Info("Container in CrashLoopBackOff but restart count below threshold",
"container", status.Name,
"restarts", status.RestartCount,
"threshold", policy.RestartThreshold)
}
}
}
if podShouldBeDeleted {
log.Info("Deleting pod to attempt rescheduling", "reason", "CrashLoopBackOff threshold reached")
err := r.Delete(ctx, &pod)
if err != nil {
if errors.IsNotFound(err) || errors.IsConflict(err) {
log.Info("Pod likely already deleted or being deleted.")
return ctrl.Result{}, nil
}
log.Error(err, "Failed to delete pod in CrashLoopBackOff")
r.Recorder.Eventf(&pod, corev1.EventTypeWarning, "DeleteFailed", "Failed to delete pod (%s/%s) stuck in CrashLoopBackOff: %v", pod.Namespace, pod.Name, err)
return ctrl.Result{}, err // Requeue on deletion error
}
log.Info("Pod deleted successfully.")
r.Recorder.Eventf(&pod, corev1.EventTypeNormal, "PodDeleted", "Deleted pod (%s/%s) stuck in CrashLoopBackOff (container: %s, restarts: %d)", pod.Namespace, pod.Name, crashingContainerName, restartCount)
}
return ctrl.Result{}, nil
}
func (r *PodCrashReconciler) getOwnerDeploymentIfMonitored(ctx context.Context, pod *corev1.Pod, monitoredSet map[string]struct{}) (string, bool) {
log := log.FromContext(ctx).WithValues("pod", client.ObjectKeyFromObject(pod))
rsOwnerRef := metav1.GetControllerOf(pod)
if rsOwnerRef == nil || rsOwnerRef.APIVersion != appsv1.SchemeGroupVersion.String() || rsOwnerRef.Kind != "ReplicaSet" {
return "", false
}
var rs appsv1.ReplicaSet
rsKey := types.NamespacedName{Namespace: pod.Namespace, Name: rsOwnerRef.Name}
if err := r.Get(ctx, rsKey, &rs); err != nil {
log.V(1).Error(err, "Failed to get owner ReplicaSet", "replicaset", rsKey)
return "", false
}
depOwnerRef := metav1.GetControllerOf(&rs)
if depOwnerRef == nil || depOwnerRef.APIVersion != appsv1.SchemeGroupVersion.String() || depOwnerRef.Kind != "Deployment" {
return "", false
}
deploymentName := fmt.Sprintf("%s/%s", pod.Namespace, depOwnerRef.Name)
if _, exists := monitoredSet[deploymentName]; exists {
return deploymentName, true
}
return deploymentName, false
}
// SetupWithManager sets up the controller with the Manager.
func (r *PodCrashReconciler) SetupWithManager(mgr ctrl.Manager) error {
r.Recorder = mgr.GetEventRecorderFor("podcrash-controller")
return ctrl.NewControllerManagedBy(mgr).
Named("podcrash").
For(&corev1.Pod{}).
// Watches(
// &configv1alpha1.NodeTainterConfig{},
// handler.EnqueueRequestsFromMapFunc(r.mapConfigToPods),
// ).
Complete(r)
}