Загрузка данных


[altlinux@k8s-srv1 ~]$ sudo cat /etc/haproxy/haproxy.cfg | grep -A10 -B5 7443
    timeout connect 5s
    timeout client 50s
    timeout server 50s

frontend k8s-api
    bind *:7443
    default_backend k8s-nodes

backend k8s-nodes
    balance roundrobin
    server k8s-srv1 192.168.0.201:6443
    server k8s-srv2 192.168.0.202:6443
    server k8s-srv3 192.168.0.203:6443

listen stats
    bind *:9000
[altlinux@k8s-srv1 ~]$ sudo systemctl status haproxy
● haproxy.service - HAProxy Load Balancer
     Loaded: loaded (/usr/lib/systemd/system/haproxy.service; enabled; preset: disabled)
     Active: active (running) since Thu 2026-04-16 05:30:59 UTC; 1h 11min ago
 Invocation: 0fd16c60ee5142c788b26cbaf1481a49
       Docs: man:haproxy(1)
   Main PID: 10496 (haproxy)
     Status: "Ready."
      Tasks: 3 (limit: 2333)
     Memory: 46.7M (peak: 47.2M)
        CPU: 190ms
     CGroup: /system.slice/haproxy.service
             ├─10496 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -f /etc/haproxy/conf.d -p /run/haproxy.pi>
             └─10499 /usr/sbin/haproxy -Ws -f /etc/haproxy/haproxy.cfg -f /etc/haproxy/conf.d -p /run/haproxy.pi>

апр 16 06:41:08 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:40:57.971] stats stats/<STATS> 0/0/10>
апр 16 06:41:14 k8s-srv1 haproxy[10499]: 192.168.0.201:55530 [16/Apr/2026:06:41:11.359] k8s-api k8s-nodes/k8s-sr>
апр 16 06:41:18 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:08.057] stats stats/<STATS> 0/0/10>
апр 16 06:41:22 k8s-srv1 haproxy[10499]: 192.168.0.201:52276 [16/Apr/2026:06:41:19.915] k8s-api k8s-nodes/k8s-sr>
апр 16 06:41:28 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:18.113] stats stats/<STATS> 0/0/10>
апр 16 06:41:38 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:28.179] stats stats/<STATS> 0/0/10>
апр 16 06:41:48 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:38.243] stats stats/<STATS> 0/0/10>
апр 16 06:41:58 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:48.735] stats stats/<STATS> 0/0/10>
апр 16 06:42:08 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:58.793] stats stats/<STATS> 0/0/10>
апр 16 06:42:18 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:42:08.852] stats stats/<STATS> 0/0/10>
[altlinux@k8s-srv1 ~]$ sudo journalctl -xeu haproxy | tail -20
апр 16 06:39:36 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:39:26.721] stats stats/<STATS> 0/0/10117 22938 LR 1/1/0/0/0 0/0
апр 16 06:39:47 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:39:36.838] stats stats/<STATS> 0/0/10382 22938 LR 1/1/0/0/0 0/0
апр 16 06:39:57 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:39:47.220] stats stats/<STATS> 0/0/10372 22938 LR 1/1/0/0/0 0/0
апр 16 06:40:07 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:39:57.593] stats stats/<STATS> 0/0/10048 22937 LR 1/1/0/0/0 0/0
апр 16 06:40:17 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:40:07.641] stats stats/<STATS> 0/0/10051 22943 LR 1/1/0/0/0 0/0
апр 16 06:40:27 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:40:17.692] stats stats/<STATS> 0/0/10063 22946 LR 1/1/0/0/0 0/0
апр 16 06:40:37 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:40:27.756] stats stats/<STATS> 0/0/10070 22946 LR 1/1/0/0/0 0/0
апр 16 06:40:47 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:40:37.826] stats stats/<STATS> 0/0/10069 22946 LR 1/1/0/0/0 0/0
апр 16 06:40:57 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:40:47.896] stats stats/<STATS> 0/0/10074 22946 LR 1/1/0/0/0 0/0
апр 16 06:41:08 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:40:57.971] stats stats/<STATS> 0/0/10086 22947 LR 1/1/0/0/0 0/0
апр 16 06:41:14 k8s-srv1 haproxy[10499]: 192.168.0.201:55530 [16/Apr/2026:06:41:11.359] k8s-api k8s-nodes/k8s-srv2 1/-1/3006 0 SC 2/1/0/0/3 0/0
апр 16 06:41:18 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:08.057] stats stats/<STATS> 0/0/10055 22940 LR 1/1/0/0/0 0/0
апр 16 06:41:22 k8s-srv1 haproxy[10499]: 192.168.0.201:52276 [16/Apr/2026:06:41:19.915] k8s-api k8s-nodes/k8s-srv3 1/-1/3006 0 SC 2/1/0/0/3 0/0
апр 16 06:41:28 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:18.113] stats stats/<STATS> 0/0/10066 22940 LR 1/1/0/0/0 0/0
апр 16 06:41:38 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:28.179] stats stats/<STATS> 0/0/10063 22942 LR 1/1/0/0/0 0/0
апр 16 06:41:48 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:38.243] stats stats/<STATS> 0/0/10491 22942 LR 1/1/0/0/0 0/0
апр 16 06:41:58 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:48.735] stats stats/<STATS> 0/0/10057 22942 LR 1/1/0/0/0 0/0
апр 16 06:42:08 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:41:58.793] stats stats/<STATS> 0/0/10059 22942 LR 1/1/0/0/0 0/0
апр 16 06:42:18 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:42:08.852] stats stats/<STATS> 0/0/10061 22942 LR 1/1/0/0/0 0/0
апр 16 06:42:28 k8s-srv1 haproxy[10499]: 192.168.0.1:32906 [16/Apr/2026:06:42:18.914] stats stats/<STATS> 0/0/10074 22946 LR 1/1/0/0/0 0/0
[altlinux@k8s-srv1 ~]$ ls -la /etc/kubernetes/manifests/
итого 24
drwxrwxr-x 2 root kube 4096 апр 16 05:50 .
drwxrwxr-x 4 root kube 4096 апр 16 05:50 ..
-rw------- 1 root root 2602 апр 16 05:50 etcd.yaml
-rw------- 1 root root 3891 апр 16 05:50 kube-apiserver.yaml
-rw------- 1 root root 3390 апр 16 05:50 kube-controller-manager.yaml
-rw------- 1 root root 1726 апр 16 05:50 kube-scheduler.yaml
[altlinux@k8s-srv1 ~]$ sudo crictl --runtime-endpoint unix:///var/run/containerd/containerd.sock ps -a | grep kube-apiserver
d3ba3b0a80cd1       5c6acd67e9cd1       20 seconds ago      Running             kube-apiserver            148                 f91c5eb375244       kube-apiserver-k8s-srv1            kube-system
b322897294671       5c6acd67e9cd1       58 seconds ago      Exited              kube-apiserver            147                 f91c5eb375244       kube-apiserver-k8s-srv1            kube-system
[altlinux@k8s-srv1 ~]$ sudo crictl logs d3ba3b0a80cd1
I0416 06:42:23.047205       1 options.go:263] external host was not specified, using 192.168.0.201
I0416 06:42:23.048652       1 server.go:150] Version: v1.35.0
I0416 06:42:23.048670       1 server.go:152] "Golang settings" GOGC="" GOMAXPROCS="" GOTRACEBACK=""
W0416 06:42:23.382127       1 logging.go:55] [core] [Channel #1 SubChannel #2]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled"
W0416 06:42:23.384550       1 logging.go:55] [core] [Channel #4 SubChannel #5]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled"
I0416 06:42:23.384902       1 shared_informer.go:349] "Waiting for caches to sync" controller="node_authorizer"
I0416 06:42:23.385327       1 shared_informer.go:370] "Waiting for caches to sync"
I0416 06:42:23.391749       1 plugins.go:157] Loaded 14 mutating admission controller(s) successfully in the following order: NamespaceLifecycle,LimitRanger,ServiceAccount,NodeRestriction,TaintNodesByCondition,Priority,DefaultTolerationSeconds,DefaultStorageClass,StorageObjectInUseProtection,RuntimeClass,DefaultIngressClass,PodTopologyLabels,MutatingAdmissionPolicy,MutatingAdmissionWebhook.
I0416 06:42:23.391771       1 plugins.go:160] Loaded 14 validating admission controller(s) successfully in the following order: LimitRanger,ServiceAccount,PodSecurity,Priority,PersistentVolumeClaimResize,RuntimeClass,CertificateApproval,CertificateSigning,ClusterTrustBundleAttest,CertificateSubjectRestriction,NodeDeclaredFeatureValidator,ValidatingAdmissionPolicy,ValidatingAdmissionWebhook,ResourceQuota.
I0416 06:42:23.391935       1 instance.go:240] Using reconciler: lease
W0416 06:42:23.396330       1 logging.go:55] [core] [Channel #7 SubChannel #10]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: authentication handshake failed: context canceled"
W0416 06:42:23.401314       1 logging.go:55] [core] [Channel #13 SubChannel #14]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled"
W0416 06:42:23.408760       1 logging.go:55] [core] [Channel #21 SubChannel #22]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled"
I0416 06:42:23.418295       1 handler.go:304] Adding GroupVersion apiextensions.k8s.io v1 to ResourceManager
W0416 06:42:23.418311       1 genericapiserver.go:787] Skipping API apiextensions.k8s.io/v1beta1 because it has no resources.
I0416 06:42:23.422256       1 cidrallocator.go:198] starting ServiceCIDR Allocator Controller
W0416 06:42:23.422920       1 logging.go:55] [core] [Channel #27 SubChannel #28]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled"
W0416 06:42:23.428089       1 logging.go:55] [core] [Channel #31 SubChannel #32]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled"
W0416 06:42:23.433177       1 logging.go:55] [core] [Channel #35 SubChannel #36]grpc: addrConn.createTransport failed to connect to {Addr: "127.0.0.1:2379", ServerName: "127.0.0.1:2379", BalancerAttributes: {"<%!p(pickfirstleaf.managedByPickfirstKeyType={})>": "<%!p(bool=true)>" }}. Err: connection error: desc = "transport: Error while dialing: dial tcp 127.0.0.1:2379: operation was canceled"

[altlinux@k8s-srv1 ~]$ sudo systemctl stop haproxy
[altlinux@k8s-srv1 ~]$ sudo ss -tlnp | grep 7443
[altlinux@k8s-srv1 ~]$ curl -k https://192.168.0.253:7443/healthz
curl: (7) Failed to connect to 192.168.0.253 port 7443 after 0 ms: Could not connect to server
[altlinux@k8s-srv1 ~]$ sudo sed -i 's/clusterDNS: \[169.254.25.10\]/clusterDNS: \[10.96.0.10\]/' /etc/
sed: невозможно редактировать /etc/: это не обычный файл
[altlinux@k8s-srv1 ~]$ sudo sed -i 's/clusterDNS: \[169.254.25.10\]/clusterDNS: \[10.96.0.10\]/' /etc/kubernetes/kubeadm-config.yaml
[altlinux@k8s-srv1 ~]$