I have seen scenarios where I or someone, who deployed `rook-ceph cluster` want to clean up or delete “rook-ceph” namespace which hosts most of the ceph cluster and rook operator pods. However, this deletion got stuck or IOW, some of the pods get into “terminating” state forever which is not good There are many threads or discussions in various forums on, how to tackle this scenario or how to solve this and delete all stuck pods. I would like to summarize the command worked for me or helped me to successfully delete the pods and thus namespace when I encountered this issue.
If you are in this scenario, please try this out!
[terminal]#kubectl -n rook-ceph patch cephclusters.ceph.rook.io rook-ceph -p ‘{“metadata”:{“finalizers”: []}}’ –type=merge [/terminal]
More details on how we landed on this situation..etc can be found here:
https://github.com/rook/rook/issues/2668
If you are still unlucky, check the detail of this namespace like below and then remove the finalizer or problematic marker.
[root@ ceph]# kubectl get ns/rook-ceph -oyaml
apiVersion: v1
kind: Namespace
metadata:
creationTimestamp: "2021-03-19T10:06:57Z"
deletionTimestamp: "2021-03-22T11:38:35Z"
managedFields:
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:phase: {}
manager: kubectl-create
operation: Update
time: "2021-03-19T10:06:57Z"
- apiVersion: v1
fieldsType: FieldsV1
fieldsV1:
f:status:
f:conditions:
.: {}
k:{"type":"NamespaceContentRemaining"}:
.: {}
f:lastTransitionTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"NamespaceDeletionContentFailure"}:
.: {}
f:lastTransitionTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"NamespaceDeletionDiscoveryFailure"}:
.: {}
f:lastTransitionTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"NamespaceDeletionGroupVersionParsingFailure"}:
.: {}
f:lastTransitionTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
k:{"type":"NamespaceFinalizersRemaining"}:
.: {}
f:lastTransitionTime: {}
f:message: {}
f:reason: {}
f:status: {}
f:type: {}
manager: kube-controller-manager
operation: Update
time: "2021-03-22T11:38:40Z"
name: rook-ceph
resourceVersion: "1121184"
uid: b962942b-8f2d-4fac-96df-22ff18a77143
spec:
finalizers:
- kubernetes
status:
conditions:
- lastTransitionTime: "2021-03-22T11:38:40Z"
message: All resources successfully discovered
reason: ResourcesDiscovered
status: "False"
type: NamespaceDeletionDiscoveryFailure
- lastTransitionTime: "2021-03-22T11:38:40Z"
message: All legacy kube types successfully parsed
reason: ParsedGroupVersions
status: "False"
type: NamespaceDeletionGroupVersionParsingFailure
- lastTransitionTime: "2021-03-22T11:39:40Z"
message: All content successfully deleted, may be waiting on finalization
reason: ContentDeleted
status: "False"
type: NamespaceDeletionContentFailure
- lastTransitionTime: "2021-03-22T11:38:40Z"
message: 'Some resources are remaining: cephfilesystems.ceph.rook.io has 1 resource
instances'
reason: SomeResourcesRemain
status: "True"
type: NamespaceContentRemaining
- lastTransitionTime: "2021-03-22T11:38:40Z"
message: 'Some content in the namespace has finalizers remaining: cephfilesystem.ceph.rook.io
in 1 resource instances'
reason: SomeFinalizersRemain
status: "True"
type: NamespaceFinalizersRemaining
phase: Terminating
[root@ ceph]# kubectl edit cephfilesystems.ceph.rook.io -n rook-ceph
cephfilesystem.ceph.rook.io/myfs edited
Copyright secured by Digiprove © 2019-2021 Humble Chirammal