root@sbat1:~# journalctl | grep error Jun 10 08:30:37 sbat1 kernel: EXT4-fs (dm-0): re-mounted. Opts: errors=remount-ro Jun 10 08:30:41 sbat1 gpu-manager[1540]: update-alternatives: error: no alternatives for x86_64-linux-gnu_gfxcore_conf Jun 10 08:31:11 sbat1 containerd[1879]: time="2019-06-10T08:31:11.195930015-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.btrfs" error="path /var/lib/containerd/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" Jun 10 08:31:11 sbat1 dockerd[1887]: time="2019-06-10T08:31:11.224329234-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.btrfs" error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" Jun 10 08:31:11 sbat1 dockerd[1887]: time="2019-06-10T08:31:11.236371340-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.zfs" error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter" Jun 10 08:31:11 sbat1 dockerd[1887]: time="2019-06-10T08:31:11.236421218-04:00" level=warning msg="could not use snapshotter btrfs in metadata plugin" error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" Jun 10 08:31:11 sbat1 dockerd[1887]: time="2019-06-10T08:31:11.236431453-04:00" level=warning msg="could not use snapshotter zfs in metadata plugin" error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter" Jun 10 08:31:11 sbat1 containerd[1879]: time="2019-06-10T08:31:11.243868899-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.zfs" error="path /var/lib/containerd/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter" Jun 10 08:31:11 sbat1 containerd[1879]: time="2019-06-10T08:31:11.243909678-04:00" level=warning msg="could not use snapshotter zfs in metadata plugin" error="path /var/lib/containerd/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter" Jun 10 08:31:11 sbat1 containerd[1879]: time="2019-06-10T08:31:11.243917657-04:00" level=warning msg="could not use snapshotter btrfs in metadata plugin" error="path /var/lib/containerd/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.707961 2901 controller.go:115] failed to ensure node lease exists, will retry in 200ms, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.908334 2901 controller.go:115] failed to ensure node lease exists, will retry in 400ms, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.007579 2901 remote_runtime.go:271] RemoveContainer "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977": Error response from daemon: removal of container d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.010421 2901 remote_runtime.go:271] RemoveContainer "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977": Error response from daemon: removal of container d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.244976 2901 remote_runtime.go:271] RemoveContainer "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": Error response from daemon: removal of container cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.245000 2901 kuberuntime_gc.go:142] Failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": rpc error: code = Unknown desc = failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": Error response from daemon: removal of container cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.297311 2901 remote_runtime.go:321] ContainerStatus "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.308693 2901 controller.go:115] failed to ensure node lease exists, will retry in 800ms, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.405479 2901 remote_runtime.go:321] ContainerStatus "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.405878 2901 remote_runtime.go:321] ContainerStatus "d35b3bfee9de3f7222b6e136e754fe11f6aa87a1343f646a4fa0edd1ed9e7531" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: d35b3bfee9de3f7222b6e136e754fe11f6aa87a1343f646a4fa0edd1ed9e7531 Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.409141 2901 remote_runtime.go:271] RemoveContainer "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424": Error response from daemon: removal of container c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.411959 2901 remote_runtime.go:271] RemoveContainer "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424": Error response from daemon: removal of container c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.561944 2901 remote_runtime.go:271] RemoveContainer "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": Error response from daemon: removal of container ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.561976 2901 kuberuntime_gc.go:142] Failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": rpc error: code = Unknown desc = failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": Error response from daemon: removal of container ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.614628 2901 remote_runtime.go:321] ContainerStatus "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.797763 2901 remote_runtime.go:271] RemoveContainer "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff": Error response from daemon: removal of container a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.800602 2901 remote_runtime.go:271] RemoveContainer "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff": Error response from daemon: removal of container a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.802025 2901 kuberuntime_gc.go:142] Failed to remove container "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": failed to get container status "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": rpc error: code = Unknown desc = Error: No such container: e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac Jun 10 08:31:27 sbat1 kubelet[2901]: E0610 08:31:27.109239 2901 controller.go:115] failed to ensure node lease exists, will retry in 1.6s, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:28 sbat1 kubelet[2901]: E0610 08:31:28.709694 2901 controller.go:115] failed to ensure node lease exists, will retry in 3.2s, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:37 sbat1 kubelet[2901]: 2019-06-10 08:31:37.328 [WARNING][4618] ipam.go 1101: CAS error for block, retry #0: update conflict: BlockKey(cidr=10.233.70.0/24) cidr=10.233.70.0/24 handle="cni0.e9b9b0a1fb6f7bbcb8b68eca3b8e22fa2ad0a9601cfaa5709fe242ed825c5c56" Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331394 2901 remote_runtime.go:132] StopPodSandbox "fb3b67a43bbffd3cac22666ef8a44f1df162b21bcaa66bdac8d790db17c994e9" from runtime service failed: rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod "voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha" network: fork/exec /opt/cni/bin/calico: text file busy Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331490 2901 kuberuntime_manager.go:641] killPodWithSyncResult failed: failed to "KillPodSandbox" for "5e38b391-8952-11e9-8921-288023a0c20c" with KillPodSandboxError: "rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod \"voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha\" network: fork/exec /opt/cni/bin/calico: text file busy" Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331557 2901 pod_workers.go:190] Error syncing pod 5e38b391-8952-11e9-8921-288023a0c20c ("voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha(5e38b391-8952-11e9-8921-288023a0c20c)"), skipping: failed to "KillPodSandbox" for "5e38b391-8952-11e9-8921-288023a0c20c" with KillPodSandboxError: "rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod \"voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha\" network: fork/exec /opt/cni/bin/calico: text file busy" Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031593 2901 remote_runtime.go:109] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = [failed to set up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to set up pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin], failed to clean up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to teardown pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin]] Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031658 2901 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to set up pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin], failed to clean up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to teardown pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin]] Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031682 2901 kuberuntime_manager.go:693] createPodSandbox for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to set up pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin], failed to clean up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to teardown pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin]] Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031766 2901 pod_workers.go:190] Error syncing pod c87d4f3f-7b1a-11e9-a372-40a8f029cae8 ("dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)"), skipping: failed to "CreatePodSandbox" for "dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)\" failed: rpc error: code = Unknown desc = [failed to set up sandbox container \"307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad\" network for pod \"dns-autoscaler-56c969bdb8-pwshs\": NetworkPlugin cni failed to set up pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system\" network: failed to find plugin \"calico\" in path [/opt/cni/bin], failed to clean up sandbox container \"307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad\" network for pod \"dns-autoscaler-56c969bdb8-pwshs\": NetworkPlugin cni failed to teardown pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system\" network: failed to find plugin \"calico\" in path [/opt/cni/bin]]" Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.423934 2901 remote_runtime.go:109] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to set up sandbox container "d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47" network for pod "adapter-open-onu-7b86fc69f6-xgrm8": NetworkPlugin cni failed to set up pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha" network: fork/exec /opt/cni/bin/loopback: text file busy Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.423988 2901 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47" network for pod "adapter-open-onu-7b86fc69f6-xgrm8": NetworkPlugin cni failed to set up pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha" network: fork/exec /opt/cni/bin/loopback: text file busy Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.424009 2901 kuberuntime_manager.go:693] createPodSandbox for pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47" network for pod "adapter-open-onu-7b86fc69f6-xgrm8": NetworkPlugin cni failed to set up pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha" network: fork/exec /opt/cni/bin/loopback: text file busy Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.424075 2901 pod_workers.go:190] Error syncing pod e57265d8-8952-11e9-8921-288023a0c20c ("adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)"), skipping: failed to "CreatePodSandbox" for "adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)" with CreatePodSandboxError: "CreatePodSandbox for pod \"adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)\" failed: rpc error: code = Unknown desc = failed to set up sandbox container \"d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47\" network for pod \"adapter-open-onu-7b86fc69f6-xgrm8\": NetworkPlugin cni failed to set up pod \"adapter-open-onu-7b86fc69f6-xgrm8_voltha\" network: fork/exec /opt/cni/bin/loopback: text file busy" Jun 10 08:31:41 sbat1 kubelet[2901]: 2019-06-10 08:31:41.652 [INFO][6555] ipam.go 720: Failed to update block block=10.233.70.0/24 error=update conflict: BlockKey(cidr=10.233.70.0/24) handle="cni0.61b303fe4fbc6cdd92780e291655ea9f4a0db62a3cebd8e65b08a082c48424ea" host="sbat1" root@sbat1:~# journalctl | grep Error Jun 10 08:30:37 sbat1 kernel: Error parsing PCC subspaces from PCCT Jun 10 08:30:37 sbat1 kernel: ERST: Error Record Serialization Table (ERST) support is initialized. Jun 10 08:30:41 sbat1 gpu-manager[1540]: Error: can't open /lib/modules/4.4.0-150-generic/updates/dkms Jun 10 08:30:41 sbat1 gpu-manager[1540]: Error: can't open /lib/modules/4.4.0-150-generic/updates/dkms Jun 10 08:30:42 sbat1 NetworkManager[1561]: [1560169842.3152] failed to enumerate oFono devices: GDBus.Error:org.freedesktop.DBus.Error.ServiceUnknown: The name org.ofono was not provided by any .service files Jun 10 08:31:25 sbat1 kubelet[2901]: I0610 08:31:25.605945 2901 docker_service.go:258] Docker Info: &{ID:YM4N:CMF6:4BPL:NFJO:VQWN:YKSB:QCY3:SX6D:UPFB:VQBW:NCJT:XWR2 Containers:55 ContainersRunning:1 ContainersPaused:0 ContainersStopped:54 Images:71 Driver:overlay2 DriverStatus:[[Backing Filesystem extfs] [Supports d_type true] [Native Overlay Diff true]] SystemStatus:[] Plugins:{Volume:[local] Network:[bridge host macvlan null overlay] Authorization:[] Log:[awslogs fluentd gcplogs gelf journald json-file local logentries splunk syslog]} MemoryLimit:true SwapLimit:false KernelMemory:true CPUCfsPeriod:true CPUCfsQuota:true CPUShares:true CPUSet:true IPv4Forwarding:true BridgeNfIptables:true BridgeNfIP6tables:true Debug:false NFd:33 OomKillDisable:true NGoroutines:60 SystemTime:2019-06-10T08:31:25.590657019-04:00 LoggingDriver:json-file CgroupDriver:cgroupfs NEventsListener:0 KernelVersion:4.4.0-150-generic OperatingSystem:Ubuntu 16.04.6 LTS OSType:linux Architecture:x86_64 IndexServerAddress:https://index.docker.io/v1/ RegistryConfig:0xc0002d8230 NCPU:40 MemTotal:270453170176 GenericResources:[] DockerRootDir:/var/lib/docker HTTPProxy:http://one.proxy.att.com:8888 HTTPSProxy:http://one.proxy.att.com:8888 NoProxy:135.25.24.167,sbat1,sbat1.cluster.local,135.25.24.157,sbat2,sbat2.cluster.local,135.25.24.151,sbat3,sbat3.cluster.local,127.0.0.1,localhost Name:sbat1 Labels:[] ExperimentalBuild:false ServerVersion:18.09.6 ClusterStore: ClusterAdvertise: Runtimes:map[runc:{Path:runc Args:[]}] DefaultRuntime:runc Swarm:{NodeID: NodeAddr: LocalNodeState:inactive ControlAvailable:false Error: RemoteManagers:[] Nodes:0 Managers:0 Cluster:} LiveRestoreEnabled:false Isolation: InitBinary:docker-init ContainerdCommit:{ID:bb71b10fd8f58240ca47fbb579b9d1028eea7c84 Expected:bb71b10fd8f58240ca47fbb579b9d1028eea7c84} RuncCommit:{ID:2b18fe1d885ee5083ef9f0838fee39b62d653e30 Expected:2b18fe1d885ee5083ef9f0838fee39b62d653e30} InitCommit:{ID:fec3683 Expected:fec3683} SecurityOptions:[name=apparmor name=seccomp,profile=default]} Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.007579 2901 remote_runtime.go:271] RemoveContainer "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977": Error response from daemon: removal of container d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.010421 2901 remote_runtime.go:271] RemoveContainer "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977": Error response from daemon: removal of container d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.244976 2901 remote_runtime.go:271] RemoveContainer "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": Error response from daemon: removal of container cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.245000 2901 kuberuntime_gc.go:142] Failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": rpc error: code = Unknown desc = failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": Error response from daemon: removal of container cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.297311 2901 remote_runtime.go:321] ContainerStatus "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.405479 2901 remote_runtime.go:321] ContainerStatus "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.405878 2901 remote_runtime.go:321] ContainerStatus "d35b3bfee9de3f7222b6e136e754fe11f6aa87a1343f646a4fa0edd1ed9e7531" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: d35b3bfee9de3f7222b6e136e754fe11f6aa87a1343f646a4fa0edd1ed9e7531 Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.409141 2901 remote_runtime.go:271] RemoveContainer "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424": Error response from daemon: removal of container c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.411959 2901 remote_runtime.go:271] RemoveContainer "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424": Error response from daemon: removal of container c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.561944 2901 remote_runtime.go:271] RemoveContainer "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": Error response from daemon: removal of container ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.561976 2901 kuberuntime_gc.go:142] Failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": rpc error: code = Unknown desc = failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": Error response from daemon: removal of container ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.614628 2901 remote_runtime.go:321] ContainerStatus "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.797763 2901 remote_runtime.go:271] RemoveContainer "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff": Error response from daemon: removal of container a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.800602 2901 remote_runtime.go:271] RemoveContainer "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff": Error response from daemon: removal of container a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.802025 2901 kuberuntime_gc.go:142] Failed to remove container "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": failed to get container status "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": rpc error: code = Unknown desc = Error: No such container: e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac Jun 10 08:31:36 sbat1 kubelet[2901]: E0610 08:31:36.366465 2901 nestedpendingoperations.go:267] Operation for "\"kubernetes.io/secret/e57265d8-8952-11e9-8921-288023a0c20c-default-token-dsb6t\" (\"e57265d8-8952-11e9-8921-288023a0c20c\")" failed. No retries permitted until 2019-06-10 08:31:36.866432348 -0400 EDT m=+13.085894107 (durationBeforeRetry 500ms). Error: "MountVolume.SetUp failed for volume \"default-token-dsb6t\" (UniqueName: \"kubernetes.io/secret/e57265d8-8952-11e9-8921-288023a0c20c-default-token-dsb6t\") pod \"adapter-open-onu-7b86fc69f6-xgrm8\" (UID: \"e57265d8-8952-11e9-8921-288023a0c20c\") : couldn't propagate object cache: timed out waiting for the condition" Jun 10 08:31:36 sbat1 kubelet[2901]: E0610 08:31:36.366504 2901 nestedpendingoperations.go:267] Operation for "\"kubernetes.io/secret/cf370f04-8952-11e9-8921-288023a0c20c-default-token-dsb6t\" (\"cf370f04-8952-11e9-8921-288023a0c20c\")" failed. No retries permitted until 2019-06-10 08:31:36.866479979 -0400 EDT m=+13.085941741 (durationBeforeRetry 500ms). Error: "MountVolume.SetUp failed for volume \"default-token-dsb6t\" (UniqueName: \"kubernetes.io/secret/cf370f04-8952-11e9-8921-288023a0c20c-default-token-dsb6t\") pod \"adapter-open-olt-8cf67d66b-msngz\" (UID: \"cf370f04-8952-11e9-8921-288023a0c20c\") : couldn't propagate object cache: timed out waiting for the condition" Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.396229 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/cpu,cpuacct/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/cpu,cpuacct/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope: no such file or directory Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.396282 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/blkio/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/blkio/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope: no such file or directory Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.396310 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/memory/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/memory/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope: no such file or directory Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.396334 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/devices/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/devices/system.slice/run-r53be47ab6c3e43558400eb68a60977b9.scope: no such file or directory Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.176823 2901 cni.go:331] Error adding kube-system_dns-autoscaler-56c969bdb8-pwshs/307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad to network calico/cni0: failed to find plugin "calico" in path [/opt/cni/bin] Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.196547 2901 cni.go:352] Error deleting kube-system_dns-autoscaler-56c969bdb8-pwshs/307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad from network calico/cni0: failed to find plugin "calico" in path [/opt/cni/bin] Jun 10 08:31:37 sbat1 kubelet[2901]: W0610 08:31:37.294655 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/cpu,cpuacct/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/cpu,cpuacct/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope: no such file or directory Jun 10 08:31:37 sbat1 kubelet[2901]: W0610 08:31:37.294704 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/blkio/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/blkio/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope: no such file or directory Jun 10 08:31:37 sbat1 kubelet[2901]: W0610 08:31:37.294733 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/memory/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/memory/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope: no such file or directory Jun 10 08:31:37 sbat1 kubelet[2901]: W0610 08:31:37.294764 2901 raw.go:87] Error while processing event ("/sys/fs/cgroup/devices/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope": 0x40000100 == IN_CREATE|IN_ISDIR): inotify_add_watch /sys/fs/cgroup/devices/system.slice/run-r3fdefd2d04464caca4bb74fb43d1d17d.scope: no such file or directory Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.330739 2901 cni.go:352] Error deleting voltha_voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr/fb3b67a43bbffd3cac22666ef8a44f1df162b21bcaa66bdac8d790db17c994e9 from network calico/cni0: fork/exec /opt/cni/bin/calico: text file busy Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331490 2901 kuberuntime_manager.go:641] killPodWithSyncResult failed: failed to "KillPodSandbox" for "5e38b391-8952-11e9-8921-288023a0c20c" with KillPodSandboxError: "rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod \"voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha\" network: fork/exec /opt/cni/bin/calico: text file busy" Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331557 2901 pod_workers.go:190] Error syncing pod 5e38b391-8952-11e9-8921-288023a0c20c ("voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha(5e38b391-8952-11e9-8921-288023a0c20c)"), skipping: failed to "KillPodSandbox" for "5e38b391-8952-11e9-8921-288023a0c20c" with KillPodSandboxError: "rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod \"voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha\" network: fork/exec /opt/cni/bin/calico: text file busy" Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031766 2901 pod_workers.go:190] Error syncing pod c87d4f3f-7b1a-11e9-a372-40a8f029cae8 ("dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)"), skipping: failed to "CreatePodSandbox" for "dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)\" failed: rpc error: code = Unknown desc = [failed to set up sandbox container \"307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad\" network for pod \"dns-autoscaler-56c969bdb8-pwshs\": NetworkPlugin cni failed to set up pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system\" network: failed to find plugin \"calico\" in path [/opt/cni/bin], failed to clean up sandbox container \"307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad\" network for pod \"dns-autoscaler-56c969bdb8-pwshs\": NetworkPlugin cni failed to teardown pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system\" network: failed to find plugin \"calico\" in path [/opt/cni/bin]]" Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.945352 2901 cni.go:331] Error adding voltha_adapter-open-onu-7b86fc69f6-xgrm8/d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47 to network loopback/cni-loopback: fork/exec /opt/cni/bin/loopback: text file busy Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.424075 2901 pod_workers.go:190] Error syncing pod e57265d8-8952-11e9-8921-288023a0c20c ("adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)"), skipping: failed to "CreatePodSandbox" for "adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)" with CreatePodSandboxError: "CreatePodSandbox for pod \"adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)\" failed: rpc error: code = Unknown desc = failed to set up sandbox container \"d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47\" network for pod \"adapter-open-onu-7b86fc69f6-xgrm8\": NetworkPlugin cni failed to set up pod \"adapter-open-onu-7b86fc69f6-xgrm8_voltha\" network: fork/exec /opt/cni/bin/loopback: text file busy" Jun 10 08:31:40 sbat1 kubelet[2901]: odSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[65534],FSGroup:*65534,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:&Affinity{NodeAffinity:&NodeAffinity{RequiredDuringSchedulingIgnoredDuringExecution:nil,PreferredDuringSchedulingIgnoredDuringExecution:[{100 {[{node-role.kubernetes.io/master In []}] []}}],},PodAffinity:nil,PodAntiAffinity:&PodAntiAffinity{RequiredDuringSchedulingIgnoredDuringExecution:[{LabelSelector{MatchLabels:map[string]string{k8s-app: dns-autoscaler,},MatchExpressions:[],} [] kubernetes.io/hostname}],PreferredDuringSchedulingIgnoredDuringExecution:[],},},SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{CriticalAddonsOnly Exists } {node.kubernetes.io/not-ready Exists NoExecute 0xc4207d6f10} {node.kubernetes.io/unreachable Exists NoExecute 0xc4207d6f30}],HostAliases:[],PriorityClassName:system-cluster-critical,Priority:*2000000000,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-20 12:17:37 -0400 EDT } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-03 14:08:50 -0400 EDT ContainersNotReady containers with unready status: [autoscaler]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-10 08:31:35 -0400 EDT ContainersNotReady containers with unready status: [autoscaler]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-20 12:17:37 -0400 EDT }],Message:,Reason:,HostIP:135.25.24.167,PodIP:,StartTime:2019-05-20 12:17:37 -0400 EDT,ContainerStatuses:[{autoscaler {nil nil ContainerStateTerminated{ExitCode:2,Signal:0,Reason:Error,Message:,StartedAt:2019-06-03 14:08:50 -0400 EDT,FinishedAt:2019-06-10 08:26:56 -0400 EDT,ContainerID:docker://2222ae9516d69e2d22c62d110754d8d552b206a6a0f957f200c3436f53efdce7,}} {nil nil nil} false 4 k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0 docker-pullable://k8s.gcr.io/cluster-proportional-autoscaler-amd64 Jun 10 08:31:41 sbat1 kubelet[2901]: etAction{Path:/,Port:8443,Host:,Scheme:HTTPS,HTTPHeaders:[],},TCPSocket:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,} nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:kubernetes-dashboard,DeprecatedServiceAccount:kubernetes-dashboard,NodeName:sbat1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node-role.kubernetes.io/master NoSchedule } {node.kubernetes.io/not-ready Exists NoExecute 0xc42030c500} {node.kubernetes.io/unreachable Exists NoExecute 0xc42030c520}],HostAliases:[],PriorityClassName:system-cluster-critical,Priority:*2000000000,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-20 12:17:48 -0400 EDT } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-03 14:08:50 -0400 EDT ContainersNotReady containers with unready status: [kubernetes-dashboard]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-10 08:31:35 -0400 EDT ContainersNotReady containers with unready status: [kubernetes-dashboard]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-20 12:17:48 -0400 EDT }],Message:,Reason:,HostIP:135.25.24.167,PodIP:,StartTime:2019-05-20 12:17:48 -0400 EDT,ContainerStatuses:[{kubernetes-dashboard {nil nil ContainerStateTerminated{ExitCode:2,Signal:0,Reason:Error,Message:,StartedAt:2019-06-03 14:08:50 -0400 EDT,FinishedAt:2019-06-10 08:26:56 -0400 EDT,ContainerID:docker://6dedbd542bacbdd77647ececc278a816b0c7468485f0d78a75c7f346d043d364,}} {nil nil nil} false Jun 10 08:31:41 sbat1 pulseaudio[2379]: [pulseaudio] bluez5-util.c: GetManagedObjects() failed: org.freedesktop.DBus.Error.TimedOut: Failed to activate service 'org.bluez': timed out Jun 10 08:31:41 sbat1 kubelet[2901]: omountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc4202e4f80} {node.kubernetes.io/unreachable Exists NoExecute 0xc4202e4fa0}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-07 14:01:40 -0400 EDT } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-07 14:01:44 -0400 EDT ContainersNotReady containers with unready status: [adapter-open-onu]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-10 08:31:35 -0400 EDT ContainersNotReady containers with unready status: [adapter-open-onu]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-07 14:34:34 -0400 EDT }],Message:,Reason:,HostIP:135.25.24.167,PodIP:,StartTime:2019-06-07 14:01:40 -0400 EDT,ContainerStatuses:[{adapter-open-onu {nil nil ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2019-06-07 14:01:43 -0400 EDT,FinishedAt:2019-06-10 08:27:06 -0400 EDT,ContainerID:docker://bb5243dbbd2373645d16891bc5cde22c0ccb55b610e7d6d55ca8f5b9275506d7,}} {nil nil nil} false 0 voltha/voltha-openonu-adapter:2.0.0 docker-pullable://voltha/voltha-openonu-adapter@sha256:1618913e03da03fd8102c164be5fb60a0b882d078512ee1dd58ec11043114596 docker://bb5243dbbd2373645d16891bc5cde22c0ccb55b610e7d6d55ca8f5b9275506d7}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} Jun 10 08:31:41 sbat1 kubelet[2901]: tainerStatuses:[{voltha {nil nil ContainerStateTerminated{ExitCode:137,Signal:0,Reason:Error,Message:,StartedAt:2019-06-07 13:59:00 -0400 EDT,FinishedAt:2019-06-10 08:27:06 -0400 EDT,ContainerID:docker://2fa5410d72ba783900d608f124211cdb718aa2c87da1a82268e0dbfd07d06e9b,}} {nil nil nil} false 3 volthacore/voltha-rw-core:2.0.0 docker-pullable://volthacore/voltha-rw-core@sha256:210be45012cd7088bf67c9a9df4423f7d2bf76db07364a993460f4981b5295e8 docker://2fa5410d72ba783900d608f124211cdb718aa2c87da1a82268e0dbfd07d06e9b}],QOSClass:BestEffort,InitContainerStatuses:[],NominatedNodeName:,},} Jun 10 08:31:51 sbat1 kubelet[2901]: lse}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:voltha-etcd-operator-etcd-restore-operator,DeprecatedServiceAccount:voltha-etcd-operator-etcd-restore-operator,NodeName:sbat1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node.kubernetes.io/not-ready Exists NoExecute 0xc42055d6f0} {node.kubernetes.io/unreachable Exists NoExecute 0xc42055d710}],HostAliases:[],PriorityClassName:,Priority:*0,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-06-07 13:57:53 -0400 EDT } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-07 13:58:00 -0400 EDT ContainersNotReady containers with unready status: [etcd-restore-operator]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-10 08:31:35 -0400 EDT ContainersNotReady containers with unready status: [etcd-restore-operator]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-06-07 14:30:47 -0400 EDT }],Message:,Reason:,HostIP:135.25.24.167,PodIP:,StartTime:2019-06-07 13:57:53 -0400 EDT,ContainerStatuses:[{etcd-restore-operator {nil nil ContainerStateTerminated{ExitCode:2,Signal:0,Reason:Error,Message:,StartedAt:2019-06-07 13:57:59 -0400 EDT,FinishedAt:2019-06-10 08:26:56 -0400 EDT,ContainerID:docker://398bcbd975288758e3eb240c225bb5dc4affa98359c614f5507baba751dfa9c9,}} {nil nil nil} false 0 quay.io/coreos/etcd-operator:v0.9.3 docker-pullable://quay.io/coreos/etcd-operator@sha256:3633b6d103e9efc2798e4214c8ee6d9b78f262eca65f085d76f5b4aee77e1e95 docker://398bcbd975288758e3eb240c225bb5dc4affa98359c614f5507baba751dfa9c9}],QOSClass:Guaranteed,InitContainerStatuses: root@sbat1:~# journalctl | grep fail Jun 10 08:30:37 sbat1 kernel: acpi PNP0A08:00: _OSC failed (AE_SUPPORT); disabling ASPM Jun 10 08:30:37 sbat1 kernel: acpi PNP0A08:01: _OSC failed (AE_SUPPORT); disabling ASPM Jun 10 08:30:37 sbat1 kernel: ata2.01: failed to resume link (SControl 0) Jun 10 08:30:37 sbat1 kernel: ata1.01: failed to resume link (SControl 0) Jun 10 08:30:42 sbat1 NetworkManager[1561]: nm_device_get_device_type: assertion 'NM_IS_DEVICE (self)' failed Jun 10 08:30:42 sbat1 NetworkManager[1561]: [1560169842.3152] failed to enumerate oFono devices: GDBus.Error:org.freedesktop.DBus.Error.ServiceUnknown: The name org.ofono was not provided by any .service files Jun 10 08:31:11 sbat1 containerd[1879]: time="2019-06-10T08:31:11.195930015-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.btrfs" error="path /var/lib/containerd/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" Jun 10 08:31:11 sbat1 dockerd[1887]: time="2019-06-10T08:31:11.224329234-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.btrfs" error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.btrfs must be a btrfs filesystem to be used with the btrfs snapshotter" Jun 10 08:31:11 sbat1 dockerd[1887]: time="2019-06-10T08:31:11.236371340-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.zfs" error="path /var/lib/docker/containerd/daemon/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter" Jun 10 08:31:11 sbat1 containerd[1879]: time="2019-06-10T08:31:11.243868899-04:00" level=warning msg="failed to load plugin io.containerd.snapshotter.v1.zfs" error="path /var/lib/containerd/io.containerd.snapshotter.v1.zfs must be a zfs filesystem to be used with the zfs snapshotter" Jun 10 08:31:25 sbat1 kubelet[2901]: I0610 08:31:25.330474 2901 flags.go:33] FLAG: --fail-swap-on="true" Jun 10 08:31:25 sbat1 kubelet[2901]: W0610 08:31:25.589644 2901 hostport_manager.go:68] The binary conntrack is not installed, this can cause failures in network connection cleanup. Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.692257 2901 kubelet.go:1282] Image garbage collection failed once. Stats initialization may not have completed yet: failed to get imageFs info: unable to find data in memory cache Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.707961 2901 controller.go:115] failed to ensure node lease exists, will retry in 200ms, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:25 sbat1 kubelet[2901]: W0610 08:31:25.782317 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "24375e42343272aea49049c225d3d591334605425b236480dd19208c758ff37e" Jun 10 08:31:25 sbat1 kubelet[2901]: W0610 08:31:25.799376 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "3b7bda75087c46318fe88e8f4038ec53d3418708e0bf2236d2cb9165724c9828" Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.908334 2901 controller.go:115] failed to ensure node lease exists, will retry in 400ms, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.914509 2901 eviction_manager.go:247] eviction manager: failed to get summary stats: failed to get node info: node "sbat1" not found Jun 10 08:31:25 sbat1 kubelet[2901]: W0610 08:31:25.926733 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "rw-core2-5c7475dfc5-zhm92_voltha": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "87a5a02004004cbf242409d11be248b4972cf21c79d1a0cc9ba27ec49d21ab60" Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.007579 2901 remote_runtime.go:271] RemoveContainer "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977": Error response from daemon: removal of container d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.010421 2901 remote_runtime.go:271] RemoveContainer "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977": Error response from daemon: removal of container d1e1b5ede45fb856549bbfcadb3dcd2cc03eccf9a2f56cac19cd035ed0fa2977 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.101857 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "kubernetes-dashboard-6c7466966c-2lttd_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "7facfb7d60b38fd646a859d72f49782e9c7de35fa70f508a7112defa83989526" Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.111634 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "kubernetes-dashboard-6c7466966c-2lttd_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "2f5eedf4b48204b919f3ac536de5edb4d410d9281148866087e8731071b1b55f" Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.244976 2901 remote_runtime.go:271] RemoveContainer "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": Error response from daemon: removal of container cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.245000 2901 kuberuntime_gc.go:142] Failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": rpc error: code = Unknown desc = failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": Error response from daemon: removal of container cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.248441 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "adapter-open-olt-8cf67d66b-msngz_voltha": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "e9b9b0a1fb6f7bbcb8b68eca3b8e22fa2ad0a9601cfaa5709fe242ed825c5c56" Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.297311 2901 remote_runtime.go:321] ContainerStatus "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.308693 2901 controller.go:115] failed to ensure node lease exists, will retry in 800ms, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.367689 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "voltha-etcd-cluster-86qr8rqjd5_voltha": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "0e913ddde7e23d3555c88663ed47d7e43d0171dc1eff4a0dcb182a9363d5bf83" Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.405479 2901 remote_runtime.go:321] ContainerStatus "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.405878 2901 remote_runtime.go:321] ContainerStatus "d35b3bfee9de3f7222b6e136e754fe11f6aa87a1343f646a4fa0edd1ed9e7531" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: d35b3bfee9de3f7222b6e136e754fe11f6aa87a1343f646a4fa0edd1ed9e7531 Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.409141 2901 remote_runtime.go:271] RemoveContainer "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424": Error response from daemon: removal of container c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.411959 2901 remote_runtime.go:271] RemoveContainer "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424": Error response from daemon: removal of container c3bd65f621400820882e54bcaaea0059ead4d9de4d7bfceb4747286e72125424 is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.482514 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "fb3b67a43bbffd3cac22666ef8a44f1df162b21bcaa66bdac8d790db17c994e9" Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.540730 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "786470e5e4b10cbfc165848e5b0104720ef5dfb873720cb907c27307f5b7f0f9" Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.561944 2901 remote_runtime.go:271] RemoveContainer "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": Error response from daemon: removal of container ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.561976 2901 kuberuntime_gc.go:142] Failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": rpc error: code = Unknown desc = failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": Error response from daemon: removal of container ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.614628 2901 remote_runtime.go:321] ContainerStatus "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e" from runtime service failed: rpc error: code = Unknown desc = Error: No such container: ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.797763 2901 remote_runtime.go:271] RemoveContainer "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff": Error response from daemon: removal of container a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.800602 2901 remote_runtime.go:271] RemoveContainer "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff" from runtime service failed: rpc error: code = Unknown desc = failed to remove container "a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff": Error response from daemon: removal of container a2dbcee03c91891f2ce37f825dcf6fb766d2a50376e40eb4901eb2e11e835eff is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.802025 2901 kuberuntime_gc.go:142] Failed to remove container "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": fai ed to get container status "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": rpc error: code = Unknown desc = Error: No such container: e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.826413 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "3b7bda75087c46318fe88e8f4038ec53d3418708e0bf2236d2cb9165724c9828" Jun 10 08:31:27 sbat1 kubelet[2901]: E0610 08:31:27.109239 2901 controller.go:115] failed to ensure node lease exists, will retry in 1.6s, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:27 sbat1 kubelet[2901]: W0610 08:31:27.915639 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "rw-core2-5c7475dfc5-zhm92_voltha": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "87a5a02004004cbf242409d11be248b4972cf21c79d1a0cc9ba27ec49d21ab60" Jun 10 08:31:27 sbat1 kubelet[2901]: W0610 08:31:27.922229 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "kubernetes-dashboard-6c7466966c-2lttd_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "7facfb7d60b38fd646a859d72f49782e9c7de35fa70f508a7112defa83989526" Jun 10 08:31:27 sbat1 kubelet[2901]: W0610 08:31:27.923626 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "kubernetes-dashboard-6c7466966c-2lttd_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "2f5eedf4b48204b919f3ac536de5edb4d410d9281148866087e8731071b1b55f" Jun 10 08:31:28 sbat1 kubelet[2901]: E0610 08:31:28.709694 2901 controller.go:115] failed to ensure node lease exists, will retry in 3.2s, error: Get https://135.25.24.167:6443/apis/coordination.k8s.io/v1beta1/namespaces/kube-node-lease/leases/sbat1?timeout=10s: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:28 sbat1 kubelet[2901]: W0610 08:31:28.877040 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "24375e42343272aea49049c225d3d591334605425b236480dd19208c758ff37e" Jun 10 08:31:29 sbat1 kubelet[2901]: W0610 08:31:29.762881 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "2f5eedf4b48204b919f3ac536de5edb4d410d9281148866087e8731071b1b55f" Jun 10 08:31:29 sbat1 kubelet[2901]: W0610 08:31:29.921831 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "24375e42343272aea49049c225d3d591334605425b236480dd19208c758ff37e" Jun 10 08:31:30 sbat1 kubelet[2901]: W0610 08:31:30.977113 2901 docker_sandbox.go:384] failed to read pod IP from plugin/docker: NetworkPlugin cni failed on the status hook for pod "kubernetes-dashboard-6c7466966c-2lttd_kube-system": CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "7facfb7d60b38fd646a859d72f49782e9c7de35fa70f508a7112defa83989526" Jun 10 08:31:35 sbat1 kubelet[2901]: W0610 08:31:35.363735 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "24375e42343272aea49049c225d3d591334605425b236480dd19208c758ff37e" Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.292396 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "7facfb7d60b38fd646a859d72f49782e9c7de35fa70f508a7112defa83989526" Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.308578 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "fb3b67a43bbffd3cac22666ef8a44f1df162b21bcaa66bdac8d790db17c994e9" Jun 10 08:31:36 sbat1 kubelet[2901]: E0610 08:31:36.366465 2901 nestedpendingoperations.go:267] Operation for "\"kubernetes.io/secret/e57265d8-8952-11e9-8921-288023a0c20c-default-token-dsb6t\" (\"e57265d8-8952-11e9-8921-288023a0c20c\")" failed. No retries permitted until 2019-06-10 08:31:36.866432348 -0400 EDT m=+13.085894107 (durationBeforeRetry 500ms). Error: "MountVolume.SetUp failed for volume \"default-token-dsb6t\" (UniqueName: \"kubernetes.io/secret/e57265d8-8952-11e9-8921-288023a0c20c-default-token-dsb6t\") pod \"adapter-open-onu-7b86fc69f6-xgrm8\" (UID: \"e57265d8-8952-11e9-8921-288023a0c20c\") : couldn't propagate object cache: timed out waiting for the condition" Jun 10 08:31:36 sbat1 kubelet[2901]: E0610 08:31:36.366504 2901 nestedpendingoperations.go:267] Operation for "\"kubernetes.io/secret/cf370f04-8952-11e9-8921-288023a0c20c-default-token-dsb6t\" (\"cf370f04-8952-11e9-8921-288023a0c20c\")" failed. No retries permitted until 2019-06-10 08:31:36.866479979 -0400 EDT m=+13.085941741 (durationBeforeRetry 500ms). Error: "MountVolume.SetUp failed for volume \"default-token-dsb6t\" (UniqueName: \"kubernetes.io/secret/cf370f04-8952-11e9-8921-288023a0c20c-default-token-dsb6t\") pod \"adapter-open-olt-8cf67d66b-msngz\" (UID: \"cf370f04-8952-11e9-8921-288023a0c20c\") : couldn't propagate object cache: timed out waiting for the condition" Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.625393 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "87a5a02004004cbf242409d11be248b4972cf21c79d1a0cc9ba27ec49d21ab60" Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.953186 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "e9b9b0a1fb6f7bbcb8b68eca3b8e22fa2ad0a9601cfaa5709fe242ed825c5c56" Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.978879 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "786470e5e4b10cbfc165848e5b0104720ef5dfb873720cb907c27307f5b7f0f9" Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.176823 2901 cni.go:331] Error adding kube-system_dns-autoscaler-56c969bdb8-pwshs/307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad to network calico/cni0: failed to find plugin "calico" in path [/opt/cni/bin] Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.196547 2901 cni.go:352] Error deleting kube-system_dns-autoscaler-56c969bdb8-pwshs/307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad from network calico/cni0: failed to find plugin "calico" in path [/opt/cni/bin] Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331394 2901 remote_runtime.go:132] StopPodSandbox "fb3b67a43bbffd3cac22666ef8a44f1df162b21bcaa66bdac8d790db17c994e9" from runtime service failed: rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod "voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha" network: fork/exec /opt/cni/bin/calico: text file busy Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331490 2901 kuberuntime_manager.go:641] killPodWithSyncResult failed: failed to "KillPodSandbox" for "5e38b391-8952-11e9-8921-288023a0c20c" with KillPodSandboxError: "rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod \"voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha\" network: fork/exec /opt/cni/bin/calico: text file busy" Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331557 2901 pod_workers.go:190] Error syncing pod 5e38b391-8952-11e9-8921-288023a0c20c ("voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha(5e38b391-8952-11e9-8921-288023a0c20c)"), skipping: failed to "KillPodSandbox" for "5e38b391-8952-11e9-8921-288023a0c20c" with KillPodSandboxError: "rpc error: code = Unknown desc = NetworkPlugin cni failed to teardown pod \"voltha-etcd-operator-etcd-restore-operator-6777b84b45-qbbwr_voltha\" network: fork/exec /opt/cni/bin/calico: text file busy" Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031593 2901 remote_runtime.go:109] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = [failed to set up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to set up pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin], failed to clean up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to teardown pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin]] Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031658 2901 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to set up pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin], failed to clean up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to teardown pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin]] Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031682 2901 kuberuntime_manager.go:693] createPodSandbox for pod "dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)" failed: rpc error: code = Unknown desc = [failed to set up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to set up pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin], failed to clean up sandbox container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" network for pod "dns-autoscaler-56c969bdb8-pwshs": NetworkPlugin cni failed to teardown pod "dns-autoscaler-56c969bdb8-pwshs_kube-system" network: failed to find plugin "calico" in path [/opt/cni/bin]] Jun 10 08:31:38 sbat1 kubelet[2901]: E0610 08:31:38.031766 2901 pod_workers.go:190] Error syncing pod c87d4f3f-7b1a-11e9-a372-40a8f029cae8 ("dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)"), skipping: failed to "CreatePodSandbox" for "dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)" with CreatePodSandboxError: "CreatePodSandbox for pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system(c87d4f3f-7b1a-11e9-a372-40a8f029cae8)\" failed: rpc error: code = Unknown desc = [failed to set up sandbox container \"307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad\" network for pod \"dns-autoscaler-56c969bdb8-pwshs\": NetworkPlugin cni failed to set up pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system\" network: failed to find plugin \"calico\" in path [/opt/cni/bin], failed to clean up sandbox container \"307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad\" network for pod \"dns-autoscaler-56c969bdb8-pwshs\": NetworkPlugin cni failed to teardown pod \"dns-autoscaler-56c969bdb8-pwshs_kube-system\" network: failed to find plugin \"calico\" in path [/opt/cni/bin]]" Jun 10 08:31:38 sbat1 NetworkManager[1561]: nm_device_get_device_type: assertion 'NM_IS_DEVICE (self)' failed Jun 10 08:31:38 sbat1 NetworkManager[1561]: nm_device_get_device_type: assertion 'NM_IS_DEVICE (self)' failed Jun 10 08:31:39 sbat1 kubelet[2901]: W0610 08:31:39.112106 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "307bc40756d44122d6dfa4aca24bcdda523c5766fa5f207c4a8df8b472ab3aad" Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.423934 2901 remote_runtime.go:109] RunPodSandbox from runtime service failed: rpc error: code = Unknown desc = failed to set up sandbox container "d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47" network for pod "adapter-open-onu-7b86fc69f6-xgrm8": NetworkPlugin cni failed to set up pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha" network: fork/exec /opt/cni/bin/loopback: text file busy Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.423988 2901 kuberuntime_sandbox.go:68] CreatePodSandbox for pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47" network for pod "adapter-open-onu-7b86fc69f6-xgrm8": NetworkPlugin cni failed to set up pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha" network: fork/exec /opt/cni/bin/loopback: text file busy Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.424009 2901 kuberuntime_manager.go:693] createPodSandbox for pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)" failed: rpc error: code = Unknown desc = failed to set up sandbox container "d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47" network for pod "adapter-open-onu-7b86fc69f6-xgrm8": NetworkPlugin cni failed to set up pod "adapter-open-onu-7b86fc69f6-xgrm8_voltha" network: fork/exec /opt/cni/bin/loopback: text file busy Jun 10 08:31:39 sbat1 kubelet[2901]: E0610 08:31:39.424075 2901 pod_workers.go:190] Error syncing pod e57265d8-8952-11e9-8921-288023a0c20c ("adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)"), skipping: failed to "CreatePodSandbox" for "adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)" with CreatePodSandboxError: "CreatePodSandbox for pod \"adapter-open-onu-7b86fc69f6-xgrm8_voltha(e57265d8-8952-11e9-8921-288023a0c20c)\" failed: rpc error: code = Unknown desc = failed to set up sandbox container \"d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47\" network for pod \"adapter-open-onu-7b86fc69f6-xgrm8\": NetworkPlugin cni failed to set up pod \"adapter-open-onu-7b86fc69f6-xgrm8_voltha\" network: fork/exec /opt/cni/bin/loopback: text file busy" Jun 10 08:31:39 sbat1 NetworkManager[1561]: nm_device_get_device_type: assertion 'NM_IS_DEVICE (self)' failed Jun 10 08:31:40 sbat1 kubelet[2901]: W0610 08:31:40.170531 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "d21a5cd97a7c6002adfe22e4c2daab568695583ffe20f10a804ad5a0b0891f47" Jun 10 08:31:40 sbat1 NetworkManager[1561]: [1560169900.3313] device (cali7a243c3cc41): failed to find device 12 'cali7a243c3cc41' with udev Jun 10 08:31:40 sbat1 NetworkManager[1561]: [1560169900.4289] device (calia30c811d212): failed to find device 13 'calia30c811d212' with udev Jun 10 08:31:41 sbat1 pulseaudio[2379]: [pulseaudio] bluez5-util.c: GetManagedObjects() failed: org.freedesktop.DBus.Error.TimedOut: Failed to activate service 'org.bluez': timed out Jun 10 08:31:41 sbat1 NetworkManager[1561]: [1560169901.4992] device (cali112fb6a3678): failed to find device 14 'cali112fb6a3678' with udev Jun 10 08:31:41 sbat1 NetworkManager[1561]: [1560169901.7071] device (cali4fbe94b7ad7): failed to find device 15 'cali4fbe94b7ad7' with udev Jun 10 08:31:41 sbat1 NetworkManager[1561]: [1560169901.7881] device (cali3b1b02278da): failed to find device 16 'cali3b1b02278da' with udev Jun 10 08:31:42 sbat1 kubelet[2901]: I0610 08:31:42.957377 2901 prober.go:112] Readiness probe for "calico-node-p5sfv_kube-system(a424e645-7b1a-11e9-a372-40a8f029cae8):calico-node" failed (failure): calico/node is not ready: felix is not ready: Get http://localhost:9099/readiness: dial tcp 127.0.0.1:9099: connect: connection refused Jun 10 08:31:50 sbat1 kubelet[2901]: W0610 08:31:50.704547 2901 cni.go:309] CNI failed to retrieve network namespace path: cannot find network namespace for the terminated container "fb3b67a43bbffd3cac22666ef8a44f1df162b21bcaa66bdac8d790db17c994e9" Jun 10 08:31:51 sbat1 etcd[2996]: 2019-06-10 12:31:51.362277 W | etcdserver: request "header: txn: success:> failure:<>>" with result "size:20" took too long (101.165342ms) to execute Jun 10 08:31:51 sbat1 NetworkManager[1561]: [1560169911.9153] device (cali01576c34e18): failed to find device 18 'cali01576c34e18' with udev root@sbat1:~# journalctl | grep Fail Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.550483 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:442: Failed to list *v1.Service: Get https://135.25.24.167:6443/api/v1/services?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.550482 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:451: Failed to list *v1.Node: Get https://135.25.24.167:6443/api/v1/nodes?fieldSelector=metadata.name%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.550511 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: Get https://135.25.24.167:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.692284 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.CSIDriver: Get https://135.25.24.167:6443/apis/storage.k8s.io/v1beta1/csidrivers?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:25 sbat1 kubelet[2901]: E0610 08:31:25.717204 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.RuntimeClass: Get https://135.25.24.167:6443/apis/node.k8s.io/v1beta1/runtimeclasses?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.245000 2901 kuberuntime_gc.go:142] Failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": rpc error: code = Unknown desc = failed to remove container "cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c": Error response from daemon: removal of container cad94df48a80c430a3d928d964d68be357b7350d3fcea99c71dd1610a3b2199c is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.487595 2901 status_manager.go:485] Failed to get status for pod "kube-controller-manager-sbat1_kube-system(7f073f87ac2124e5401cdd1c8afcbe92)": Get https://135.25.24.167:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-sbat1: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.550871 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:442: Failed to list *v1.Service: Get https://135.25.24.167:6443/api/v1/services?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.552001 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:451: Failed to list *v1.Node: Get https://135.25.24.167:6443/api/v1/nodes?fieldSelector=metadata.name%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.553056 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: Get https://135.25.24.167:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.561976 2901 kuberuntime_gc.go:142] Failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": rpc error: code = Unknown desc = failed to remove container "ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e": Error response from daemon: removal of container ade3c95ea1ccdc1a9b974ba431b283b762d47c83b103d4e362c4f9e2bbb28c4e is already in progress Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.640310 2901 status_manager.go:485] Failed to get status for pod "kube-scheduler-sbat1_kube-system(0b98a5a6bae72e2da6e385144df5de45)": Get https://135.25.24.167:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-sbat1: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.692659 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.CSIDriver: Get https://135.25.24.167:6443/apis/storage.k8s.io/v1beta1/csidrivers?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.717547 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.RuntimeClass: Get https://135.25.24.167:6443/apis/node.k8s.io/v1beta1/runtimeclasses?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: W0610 08:31:26.795065 2901 status_manager.go:485] Failed to get status for pod "kube-apiserver-sbat1_kube-system(1108e76d851a80e2afe895df2cacf268)": Get https://135.25.24.167:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-sbat1: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:26 sbat1 kubelet[2901]: E0610 08:31:26.802025 2901 kuberuntime_gc.go:142] Failed to remove container "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": failed to get container status "e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac": rpc error: code = Unknown desc = Error: No such container: e69db9dd2df80ad45a29d4831b312995c757fab17889250c7fd5b8dd380b6dac Jun 10 08:31:27 sbat1 kubelet[2901]: E0610 08:31:27.551482 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:442: Failed to list *v1.Service: Get https://135.25.24.167:6443/api/v1/services?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:27 sbat1 kubelet[2901]: E0610 08:31:27.552357 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:451: Failed to list *v1.Node: Get https://135.25.24.167:6443/api/v1/nodes?fieldSelector=metadata.name%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:27 sbat1 kubelet[2901]: E0610 08:31:27.553457 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: Get https://135.25.24.167:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:27 sbat1 kubelet[2901]: E0610 08:31:27.747288 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.CSIDriver: Get https://135.25.24.167:6443/apis/storage.k8s.io/v1beta1/csidrivers?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:27 sbat1 kubelet[2901]: E0610 08:31:27.947275 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.RuntimeClass: Get https://135.25.24.167:6443/apis/node.k8s.io/v1beta1/runtimeclasses?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:28 sbat1 kubelet[2901]: W0610 08:31:28.391744 2901 status_manager.go:485] Failed to get status for pod "kube-controller-manager-sbat1_kube-system(7f073f87ac2124e5401cdd1c8afcbe92)": Get https://135.25.24.167:6443/api/v1/namespaces/kube-system/pods/kube-controller-manager-sbat1: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:28 sbat1 kubelet[2901]: W0610 08:31:28.398809 2901 status_manager.go:485] Failed to get status for pod "kube-scheduler-sbat1_kube-system(0b98a5a6bae72e2da6e385144df5de45)": Get https://135.25.24.167:6443/api/v1/namespaces/kube-system/pods/kube-scheduler-sbat1: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:28 sbat1 kubelet[2901]: E0610 08:31:28.551873 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:442: Failed to list *v1.Service: Get https://135.25.24.167:6443/api/v1/services?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:28 sbat1 kubelet[2901]: E0610 08:31:28.747158 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:451: Failed to list *v1.Node: Get https://135.25.24.167:6443/api/v1/nodes?fieldSelector=metadata.name%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:28 sbat1 kubelet[2901]: E0610 08:31:28.947259 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: Get https://135.25.24.167:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:29 sbat1 kubelet[2901]: E0610 08:31:29.147198 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.CSIDriver: Get https://135.25.24.167:6443/apis/storage.k8s.io/v1beta1/csidrivers?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:29 sbat1 kubelet[2901]: E0610 08:31:29.547196 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.RuntimeClass: Get https://135.25.24.167:6443/apis/node.k8s.io/v1beta1/runtimeclasses?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:29 sbat1 kubelet[2901]: E0610 08:31:29.747168 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:442: Failed to list *v1.Service: Get https://135.25.24.167:6443/api/v1/services?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:29 sbat1 kubelet[2901]: E0610 08:31:29.947206 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/kubelet.go:451: Failed to list *v1.Node: Get https://135.25.24.167:6443/api/v1/nodes?fieldSelector=metadata.name%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:30 sbat1 kubelet[2901]: W0610 08:31:30.147233 2901 status_manager.go:485] Failed to get status for pod "kube-apiserver-sbat1_kube-system(1108e76d851a80e2afe895df2cacf268)": Get https://135.25.24.167:6443/api/v1/namespaces/kube-system/pods/kube-apiserver-sbat1: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:30 sbat1 kubelet[2901]: E0610 08:31:30.347263 2901 reflector.go:126] k8s.io/kubernetes/pkg/kubelet/config/apiserver.go:47: Failed to list *v1.Pod: Get https://135.25.24.167:6443/api/v1/pods?fieldSelector=spec.nodeName%3Dsbat1&limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:30 sbat1 kubelet[2901]: E0610 08:31:30.547211 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.CSIDriver: Get https://135.25.24.167:6443/apis/storage.k8s.io/v1beta1/csidrivers?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:30 sbat1 kubelet[2901]: E0610 08:31:30.747223 2901 reflector.go:126] k8s.io/client-go/informers/factory.go:133: Failed to list *v1beta1.RuntimeClass: Get https://135.25.24.167:6443/apis/node.k8s.io/v1beta1/runtimeclasses?limit=500&resourceVersion=0: dial tcp 135.25.24.167:6443: connect: connection refused Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.395149 2901 container.go:409] Failed to create summary reader for "/system.slice/run-r039f25383cdd44958f0afb104c9167b5.scope": none of the resources are being tracked. Jun 10 08:31:36 sbat1 kubelet[2901]: W0610 08:31:36.397360 2901 container.go:409] Failed to create summary reader for "/system.slice/run-r635c4b958daa421291f6b748ab12a75e.scope": none of the resources are being tracked. Jun 10 08:31:37 sbat1 kubelet[2901]: E0610 08:31:37.331441 2901 kuberuntime_manager.go:846] Failed to stop sandbox {"docker" "fb3b67a43bbffd3cac22666ef8a44f1df162b21bcaa66bdac8d790db17c994e9"} Jun 10 08:31:40 sbat1 kubelet[2901]: 2019-06-10 08:31:40.247 [INFO][6050] k8s.go 790: pod info &Pod{ObjectMeta:k8s_io_apimachinery_pkg_apis_meta_v1.ObjectMeta{Name:dns-autoscaler-56c969bdb8-pwshs,GenerateName:dns-autoscaler-56c969bdb8-,Namespace:kube-system,SelfLink:/api/v1/namespaces/kube-system/pods/dns-autoscaler-56c969bdb8-pwshs,UID:c87d4f3f-7b1a-11e9-a372-40a8f029cae8,ResourceVersion:4016642,Generation:0,CreationTimestamp:2019-05-20 12:17:37 -0400 EDT,DeletionTimestamp:,DeletionGracePeriodSeconds:nil,Labels:map[string]string{k8s-app: dns-autoscaler,pod-template-hash: 56c969bdb8,},Annotations:map[string]string{scheduler.alpha.kubernetes.io/critical-pod: ,seccomp.security.alpha.kubernetes.io/pod: docker/default,},OwnerReferences:[{apps/v1 ReplicaSet dns-autoscaler-56c969bdb8 c864707c-7b1a-11e9-a372-40a8f029cae8 0xc4207d6d3a 0xc4207d6d3b}],Finalizers:[],ClusterName:,Initializers:nil,},Spec:PodSpec{Volumes:[{dns-autoscaler-token-tfbkd {nil nil nil nil nil SecretVolumeSource{SecretName:dns-autoscaler-token-tfbkd,Items:[],DefaultMode:*420,Optional:nil,} nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil nil}}],Containers:[{autoscaler k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.4.0 [/cluster-proportional-autoscaler --namespace=kube-system --default-params={"linear":{"preventSinglePointFailure":true,"coresPerReplica":256,"nodesPerReplica":16,"min":2}} --logtostderr=true --v=2 --configmap=dns-autoscaler --target=Deployment/coredns] [] [] [] [] {map[] map[cpu:{{20 -3} {} 20m DecimalSI} memory:{{10485760 0} {} 10Mi BinarySI}]} [{dns-autoscaler-token-tfbkd true /var/run/secrets/kubernetes.io/serviceaccount }] [] nil nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{beta.kubernetes.io/os: linux,},ServiceAccountName:dns-autoscaler,DeprecatedServiceAccount:dns-autoscaler,NodeName:sbat1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&P Jun 10 08:31:41 sbat1 kubelet[2901]: etAction{Path:/,Port:8443,Host:,Scheme:HTTPS,HTTPHeaders:[],},TCPSocket:nil,},InitialDelaySeconds:0,TimeoutSeconds:1,PeriodSeconds:10,SuccessThreshold:1,FailureThreshold:3,} nil nil /dev/termination-log File IfNotPresent nil false false false}],RestartPolicy:Always,TerminationGracePeriodSeconds:*30,ActiveDeadlineSeconds:nil,DNSPolicy:ClusterFirst,NodeSelector:map[string]string{},ServiceAccountName:kubernetes-dashboard,DeprecatedServiceAccount:kubernetes-dashboard,NodeName:sbat1,HostNetwork:false,HostPID:false,HostIPC:false,SecurityContext:&PodSecurityContext{SELinuxOptions:nil,RunAsUser:nil,RunAsNonRoot:nil,SupplementalGroups:[],FSGroup:nil,RunAsGroup:nil,Sysctls:[],},ImagePullSecrets:[],Hostname:,Subdomain:,Affinity:nil,SchedulerName:default-scheduler,InitContainers:[],AutomountServiceAccountToken:nil,Tolerations:[{node-role.kubernetes.io/master NoSchedule } {node.kubernetes.io/not-ready Exists NoExecute 0xc42030c500} {node.kubernetes.io/unreachable Exists NoExecute 0xc42030c520}],HostAliases:[],PriorityClassName:system-cluster-critical,Priority:*2000000000,DNSConfig:nil,ShareProcessNamespace:nil,ReadinessGates:[],},Status:PodStatus{Phase:Running,Conditions:[{Initialized True 0001-01-01 00:00:00 +0000 UTC 2019-05-20 12:17:48 -0400 EDT } {Ready False 0001-01-01 00:00:00 +0000 UTC 2019-06-03 14:08:50 -0400 EDT ContainersNotReady containers with unready status: [kubernetes-dashboard]} {ContainersReady False 0001-01-01 00:00:00 +0000 UTC 2019-06-10 08:31:35 -0400 EDT ContainersNotReady containers with unready status: [kubernetes-dashboard]} {PodScheduled True 0001-01-01 00:00:00 +0000 UTC 2019-05-20 12:17:48 -0400 EDT }],Message:,Reason:,HostIP:135.25.24.167,PodIP:,StartTime:2019-05-20 12:17:48 -0400 EDT,ContainerStatuses:[{kubernetes-dashboard {nil nil ContainerStateTerminated{ExitCode:2,Signal:0,Reason:Error,Message:,StartedAt:2019-06-03 14:08:50 -0400 EDT,FinishedAt:2019-06-10 08:26:56 -0400 EDT,ContainerID:docker://6dedbd542bacbdd77647ececc278a816b0c7468485f0d78a75c7f346d043d364,}} {nil nil nil} false Jun 10 08:31:41 sbat1 dbus[1522]: [system] Failed to activate service 'org.bluez': timed out Jun 10 08:31:41 sbat1 pulseaudio[2379]: [pulseaudio] bluez5-util.c: GetManagedObjects() failed: org.freedesktop.DBus.Error.TimedOut: Failed to activate service 'org.bluez': timed out Jun 10 08:31:41 sbat1 kubelet[2901]: 2019-06-10 08:31:41.652 [INFO][6555] ipam.go 720: Failed to update block block=10.233.70.0/24 error=update conflict: BlockKey(cidr=10.233.70.0/24) handle="cni0.61b303fe4fbc6cdd92780e291655ea9f4a0db62a3cebd8e65b08a082c48424ea" host="sbat1"