Skip to content

Neovim Snippets (Snippy)

# Kubernetes Resources
snippet deploy "Deployment"
apiVersion: apps/v1
kind: Deployment
metadata:
name: ${1:my-app}
namespace: ${2:default}
labels:
app: $1
spec:
replicas: ${3:1}
selector:
matchLabels:
app: $1
template:
metadata:
labels:
app: $1
spec:
containers:
- name: $1
image: ${4:nginx:latest}
ports:
- containerPort: ${5:80}
$0
snippet svc "Service"
apiVersion: v1
kind: Service
metadata:
name: ${1:my-service}
namespace: ${2:default}
spec:
selector:
app: ${3:my-app}
ports:
- port: ${4:80}
targetPort: ${5:80}
protocol: TCP
type: ${6:ClusterIP}
$0
snippet ing "Ingress"
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ${1:my-ingress}
namespace: ${2:default}
spec:
rules:
- host: ${3:example.com}
http:
paths:
- path: ${4:/}
pathType: Prefix
backend:
service:
name: ${5:my-service}
port:
number: ${6:80}
$0
snippet cm "ConfigMap"
apiVersion: v1
kind: ConfigMap
metadata:
name: ${1:my-config}
namespace: ${2:default}
data:
${3:key}: ${4:value}
$0
snippet secret "Secret"
apiVersion: v1
kind: Secret
metadata:
name: ${1:my-secret}
namespace: ${2:default}
type: ${3:Opaque}
stringData:
${4:key}: ${5:value}
$0
snippet ns "Namespace"
apiVersion: v1
kind: Namespace
metadata:
name: ${1:my-namespace}
labels:
environment: ${2:dev}
$0
snippet pvc "PersistentVolumeClaim"
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: ${1:my-pvc}
namespace: ${2:default}
spec:
accessModes:
- ${3:ReadWriteOnce}
resources:
requests:
storage: ${4:1Gi}
$0
snippet job "Job"
apiVersion: batch/v1
kind: Job
metadata:
name: ${1:my-job}
namespace: ${2:default}
spec:
template:
spec:
containers:
- name: $1
image: ${3:busybox}
command: [${4:"echo", "hello"}]
restartPolicy: Never
backoffLimit: ${5:3}
$0
snippet cron "CronJob"
apiVersion: batch/v1
kind: CronJob
metadata:
name: ${1:my-cronjob}
namespace: ${2:default}
spec:
schedule: "${3:*/5 * * * *}"
jobTemplate:
spec:
template:
spec:
containers:
- name: $1
image: ${4:busybox}
command: [${5:"echo", "hello"}]
restartPolicy: Never
$0
snippet sts "StatefulSet"
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: ${1:my-statefulset}
namespace: ${2:default}
spec:
serviceName: ${3:my-service}
replicas: ${4:1}
selector:
matchLabels:
app: $1
template:
metadata:
labels:
app: $1
spec:
containers:
- name: $1
image: ${5:nginx:latest}
ports:
- containerPort: ${6:80}
$0
snippet ds "DaemonSet"
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: ${1:my-daemonset}
namespace: ${2:default}
spec:
selector:
matchLabels:
app: $1
template:
metadata:
labels:
app: $1
spec:
containers:
- name: $1
image: ${2:nginx:latest}
ports:
- containerPort: ${3:80}
$0
snippet sa "ServiceAccount"
apiVersion: v1
kind: ServiceAccount
metadata:
name: ${1:my-sa}
namespace: ${2:default}
$0
snippet role "Role"
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ${1:my-role}
namespace: ${2:default}
rules:
- apiGroups: ["${3:}"]
resources: ["${4:pods}"]
verbs: ["${5:get}", "list", "watch"]
$0
snippet rb "RoleBinding"
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ${1:my-rolebinding}
namespace: ${2:default}
subjects:
- kind: ServiceAccount
name: ${3:my-sa}
namespace: $2
roleRef:
kind: Role
name: ${4:my-role}
apiGroup: rbac.authorization.k8s.io
$0
snippet hpa "HorizontalPodAutoscaler"
apiVersion: autoscaling/v2
kind: HorizontalPodAutoscaler
metadata:
name: ${1:my-hpa}
namespace: ${2:default}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: ${3:my-app}
minReplicas: ${4:1}
maxReplicas: ${5:10}
metrics:
- type: Resource
resource:
name: cpu
target:
type: Utilization
averageUtilization: ${6:80}
$0
snippet probe "Liveness + Readiness probes"
livenessProbe:
httpGet:
path: ${1:/healthz}
port: ${2:80}
periodSeconds: ${3:10}
readinessProbe:
httpGet:
path: ${4:/ready}
port: $2
periodSeconds: ${5:5}
$0
snippet limits "Resource limits"
resources:
requests:
cpu: ${1:100m}
memory: ${2:128Mi}
limits:
cpu: ${3:500m}
memory: ${4:256Mi}
$0
snippet --- "Document separator"
---
$0
snippet container "Container entry"
- name: ${1:my-container}
image: ${2:nginx:latest}
ports:
- containerPort: ${3:80}
env:
- name: ${4:MY_VAR}
value: ${5:my-value}
$0
snippet envfrom "Env from ConfigMap/Secret"
envFrom:
- configMapRef:
name: ${1:my-config}
- secretRef:
name: ${2:my-secret}
$0
snippet vol "Volume + VolumeMount pair"
volumeMounts:
- name: ${1:my-volume}
mountPath: ${2:/data}
volumes:
- name: $1
persistentVolumeClaim:
claimName: ${3:my-pvc}
$0

Caution — Avoid initialDelaySeconds

initialDelaySeconds delays the first probe by a fixed duration, masking slow starts rather than detecting them. If a container takes longer than expected to start, the delay hides the problem — and if it starts quickly, the delay wastes time. The readiness probe already gates traffic: a failing readiness probe keeps the pod out of Service endpoints without restarting it. For containers with genuinely slow startup (JVM apps, large ML models), use a startupProbe instead — it runs only during startup and prevents liveness kills until it passes, without hiding failures behind an arbitrary timer.