Skip to content

Flux Helm Usage Guide

Using kube-prometheus-stack as the example. This guide follows the Flux HelmRelease workflow.

  1. Install with Helm

    Terminal window
    helm repo add prometheus-community https://prometheus-community.github.io/helm-charts
    helm repo update
    helm install kube-prometheus prometheus-community/kube-prometheus-stack \
    -n monitoring --create-namespace --version 82.15.0
    NAME: kube-prometheus
    LAST DEPLOYED: Fri Mar 27 12:54:18 2026
    NAMESPACE: monitoring
    STATUS: deployed
    REVISION: 1
    DESCRIPTION: Install complete
    TEST SUITE: None
    NOTES:
    kube-prometheus-stack has been installed. Check its status by running:
    kubectl --namespace monitoring get pods -l "release=kube-prometheus"
    Get Grafana 'admin' user password by running:
    kubectl --namespace monitoring get secrets kube-prometheus-grafana -o jsonpath="{.data.admin-password}" | base64 -d ; echo
    Access Grafana local instance:
    export POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus" -oname)
    kubectl --namespace monitoring port-forward $POD_NAME 3000
    Get your grafana admin user password by running:
    kubectl get secret --namespace monitoring -l app.kubernetes.io/component=admin-secret -o jsonpath="{.items[0].data.admin-password}" | base64 --decode ; echo
    Visit https://github.com/prometheus-operator/kube-prometheus for instructions on how to create & configure Alertmanager and Prometheus instances using the Operator.
    ________________________________________________________
    Executed in 14.31 secs fish external
    usr time 2.31 secs 802.00 micros 2.31 secs
    sys time 0.31 secs 0.00 micros 0.31 secs
    Terminal window
    PASSWORD=$(kubectl --namespace monitoring get secret kube-prometheus-grafana -o jsonpath="{.data.admin-password}" | base64 -d)
    echo "$PASSWORD"
    POD_NAME=$(kubectl --namespace monitoring get pod -l "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=kube-prometheus" -o name)
    kubectl --namespace monitoring port-forward "$POD_NAME" 3000 &
    FWD_PID=$!
    sleep 2
    curl -s -u "admin:${PASSWORD}" http://localhost:3000/api/datasources | python3 -m json.tool
    kill "$FWD_PID"
    #!/usr/bin/bun
    const NS = "monitoring";
    const RELEASE = "kube-prometheus";
    const PROXY_PORT = 8001;
    async function main() {
    const proxy = kubectlProxy(PROXY_PORT);
    await Bun.sleep(1000);
    try {
    const password = await grafanaPassword();
    const podName = await grafanaPod();
    const fwd = portForward(podName, 3000);
    await Bun.sleep(2000);
    try {
    const res = await fetch("http://localhost:3000/api/datasources", {
    headers: { Authorization: `Basic ${btoa(`admin:${password}`)}` },
    });
    console.log(await res.json());
    } finally {
    fwd.kill();
    }
    } finally {
    proxy.kill();
    }
    }
    function kubectlProxy(port: number) {
    return Bun.spawn(["kubectl", "proxy", "--port", String(port)], {
    stdout: "ignore",
    stderr: "ignore",
    });
    }
    function k8s(path: string) {
    return fetch(`http://localhost:${PROXY_PORT}${path}`).then((r) => r.json());
    }
    async function grafanaPassword() {
    const secret = await k8s(`/api/v1/namespaces/${NS}/secrets/${RELEASE}-grafana`);
    return atob(secret.data["admin-password"]);
    }
    async function grafanaPod() {
    const label = "app.kubernetes.io/name=grafana,app.kubernetes.io/instance=" + RELEASE;
    const pods = await k8s(
    `/api/v1/namespaces/${NS}/pods?labelSelector=${encodeURIComponent(label)}`,
    );
    const name = pods.items[0]?.metadata?.name;
    if (!name) throw new Error("No grafana pod found");
    return name;
    }
    function portForward(pod: string, localPort: number) {
    return Bun.spawn([
    "kubectl",
    "--namespace",
    NS,
    "port-forward",
    `pod/${pod}`,
    String(localPort),
    ]);
    }
    await main();
  2. Experiment

    Terminal window
    kubectl get pods -n monitoring
    kubectl get svc -n monitoring
    kubectl port-forward -n monitoring svc/kube-prometheus-grafana 3000:80
    # tweak and re-apply:
    helm upgrade kube-prometheus prometheus-community/kube-prometheus-stack \
    -n monitoring -f values.yaml

    Set a memory limit on prometheus, it’s a metrics database to store time series.

    helm upgrade kube-prometheus prometheus-community/kube-prometheus-stack \
    -n monitoring \
    --set prometheus.prometheusSpec.resources.requests.memory=512Mi \
    --set prometheus.prometheusSpec.resources.limits.memory=1Gi
    kubectl get pod \
    -n monitoring \
    -l app.kubernetes.io/name=prometheus \
    -o jsonpath='{.items[0].spec.containers[0].resources}' |\
    python3 -m json.tool
    #!/usr/bin/bun
    const NS = "monitoring";
    const RELEASE = "kube-prometheus";
    const PROXY_PORT = 8001;
    async function main() {
    const proxy = kubectlProxy(PROXY_PORT);
    await Bun.sleep(1000);
    try {
    helmUpgrade();
    waitForRollout();
    const resources = await prometheusPodResources();
    console.log(JSON.stringify(resources, null, 2));
    } finally {
    proxy.kill();
    }
    }
    function kubectlProxy(port: number) {
    return Bun.spawn(["kubectl", "proxy", "--port", String(port)], {
    stdout: "ignore",
    stderr: "ignore",
    });
    }
    function k8s(path: string, init?: RequestInit) {
    return fetch(`http://localhost:${PROXY_PORT}${path}`, init).then((r) => r.json());
    }
    function helmUpgrade() {
    const result = Bun.spawnSync(
    [
    "helm",
    "upgrade",
    RELEASE,
    "prometheus-community/kube-prometheus-stack",
    "-n",
    NS,
    "--set",
    "prometheus.prometheusSpec.resources.requests.memory=512Mi",
    "--set",
    "prometheus.prometheusSpec.resources.limits.memory=1Gi",
    ],
    { stdout: "inherit", stderr: "inherit" },
    );
    if (result.exitCode !== 0) throw new Error("helm upgrade failed");
    }
    function waitForRollout() {
    const result = Bun.spawnSync(
    [
    "kubectl",
    "rollout",
    "status",
    "statefulset",
    `prometheus-${RELEASE}-kube-prome-prometheus`,
    "-n",
    NS,
    "--timeout=120s",
    ],
    { stdout: "inherit", stderr: "inherit" },
    );
    if (result.exitCode !== 0) throw new Error("rollout timed out");
    }
    async function prometheusPodResources() {
    const label = "app.kubernetes.io/name=prometheus";
    const pods = await k8s(
    `/api/v1/namespaces/${NS}/pods?labelSelector=${encodeURIComponent(label)}`,
    );
    const containers = pods.items[0]?.spec?.containers;
    if (!containers) throw new Error("No prometheus pod found");
    const prometheus = containers.find((c: any) => c.name === "prometheus");
    return prometheus?.resources;
    }
    await main();
    Terminal window
    # The actual volumes themselves
    # i.e. What volumes we have available in the cluster
    # e.g. The pizza shop has these pizzas ready to go
    kubectl get pv
    Terminal window
    # Get persistent volume claims for the `monitoring` namespace
    # i.e. what volumes a pod is requesting to use (filtered by namespace)
    # e.g. I ordered a Supreme with extra olives, can I have it please
    kubectl get pvc -n monitoring
    ## No output
  3. Export what worked

    Terminal window
    # The `-a` gives everything, which is not very useful for us here
    # helm get values kube-prometheus -n monitoring -a -o yaml > values.exported.yaml
    helm get values kube-prometheus -n monitoring -o yaml > /tmp/values.exported.yaml
  4. Uninstall the trial

    Terminal window
    helm uninstall kube-prometheus -n monitoring
  5. Generate Flux manifests

    Terminal window
    mkdir -p infrastructure/base/monitoring/
    flux create source helm prometheus-community \
    --url=https://prometheus-community.github.io/helm-charts \
    --interval=1h \
    --export > infrastructure/base/monitoring/helmrepository.yaml
    flux create helmrelease kube-prometheus \
    --source=HelmRepository/prometheus-community \
    --chart=kube-prometheus-stack --chart-version=82.15.0 \
    --release-name=kube-prometheus --target-namespace=monitoring \
    --interval=5m --values=values.exported.yaml \
    --export > infrastructure/base/monitoring/helmrelease.yaml
  6. Commit and let Flux take over

    Terminal window
    git add infrastructure/base/monitoring/
    git commit -m "Add kube-prometheus-stack via Flux"
    git push
    flux reconcile source git flux-system
    flux get helmreleases

Flux’s create commands support --export to print YAML, and HelmRelease supports releaseName, targetNamespace, and storageNamespace to match Helm release identity. (Flux)

Add the repo and install the chart:

Terminal window
helm repo add <repo-alias> https://charts.example.com
helm repo update
helm install <release-name> <repo-alias>/<chart-name> \
-n <target-namespace> \
--create-namespace \
--version <chart-version> \
-f values.yaml

If you have no values file yet, omit -f values.yaml. Helm can show the computed values and rendered manifests for the release. (Helm)

it might be helpful to look at the YAML corresponding to the Helm chart to determine what key value pairs are available:

Terminal window
# List the pods
kubectl get pods -n monitoring -l app.kubernetes.io/instance=kube-prometheus
# View the yaml
helm get manifest kube-prometheus -n monitoring

Once the test install works, capture the settings before you remove it:

Terminal window
helm list -A
helm status <release-name> -n <target-namespace>
helm get values <release-name> -n <target-namespace> -a -o yaml > values.exported.yaml
helm get manifest <release-name> -n <target-namespace> > rendered.exported.yaml

Flux needs values.exported.yaml. rendered.exported.yaml is for reference and diffing. Flux manages the chart declaratively as a HelmRelease, not by tracking raw rendered manifests. (Flux)

Terminal window
helm uninstall <release-name> -n <target-namespace>

Remove the trial deployment so Flux can recreate it from Git. (Helm)

Create the HelmRepository manifest:

Terminal window
flux create source helm <repo-alias> \
--url=https://charts.example.com \
--interval=1h \
--export > helmrepository.yaml

flux create source helm generates a HelmRepository; --export prints YAML instead of applying it. (Flux)

Generate the HelmRelease:

Terminal window
flux create helmrelease <release-name> \
--source=HelmRepository/<repo-alias> \
--chart=<chart-name> \
--chart-version=<chart-version> \
--release-name=<release-name> \
--target-namespace=<target-namespace> \
--interval=15m \
--values=values.exported.yaml \
--export > helmrelease.yaml

helm-controller reconciles the resulting HelmRelease. (Flux)

Commit:

  • helmrepository.yaml
  • helmrelease.yaml

Values can be embedded in the generated manifest or kept in a separate file, depending on how you structure the repo.

Flux bootstrap only creates the flux-system Kustomization. It watches clusters/local/ but ignores everything outside that path unless you add pointers. Create a Flux Kustomization that tells it where the monitoring manifests live:

clusters/local/infrastructure.yaml
apiVersion: kustomize.toolkit.fluxcd.io/v1
kind: Kustomization
metadata:
name: infrastructure
namespace: flux-system
spec:
interval: 10m
sourceRef:
kind: GitRepository
name: flux-system
path: ./infrastructure/base/monitoring
prune: true

If the sync path changed (e.g. you renamed clusters/dev to clusters/local), apply the updated sync config first:

Terminal window
kubectl apply -f clusters/local/flux-system/gotk-sync.yaml

Then uninstall the imperative Helm release so Flux can recreate it:

Terminal window
helm uninstall kube-prometheus -n monitoring

Commit and push:

Terminal window
git add clusters/local/infrastructure.yaml
git commit -m "Add infrastructure Kustomization for Flux"
git push

Force an immediate reconcile or wait for the interval:

Terminal window
flux reconcile source git flux-system
flux reconcile kustomization flux-system

The chain: flux-system Kustomization reads clusters/local/, finds infrastructure.yaml, follows the path to infrastructure/base/monitoring/, and applies the HelmRepository + HelmRelease.

Verify:

Terminal window
flux get kustomizations
flux get helmreleases -A
kubectl get pods -n monitoring
NAME REVISION SUSPENDED READY MESSAGE
flux-system main@sha1:99348924 False True Applied revision: main@sha1:99348924
infrastructure main@sha1:99348924 False True Applied revision: main@sha1:99348924
NAMESPACE NAME REVISION SUSPENDED READY MESSAGE
flux-system kube-prometheus 82.15.0 False True Helm install succeeded for release monitoring/kube-prometheus.v1 with chart kube-prometheus-stack@82.15.0
NAME READY STATUS RESTARTS AGE
alertmanager-kube-prometheus-kube-prome-alertmanager-0 1/2 Error 5 (90s ago) 3m3s
kube-prometheus-grafana-d6bcf544f-q4qkj 3/3 Running 0 3m4s
kube-prometheus-kube-prome-operator-6857665c6-8cp6t 1/1 Running 0 3m4s
kube-prometheus-kube-state-metrics-d949d4c54-6hbnk 1/1 Running 0 3m4s
kube-prometheus-prometheus-node-exporter-x7cxw 1/1 Running 0 3m4s
prometheus-kube-prometheus-kube-prome-prometheus-0 2/2 Running 0 3m3s
(default) ryan@vale ~/S/j/2/0/2/l/c/local (main)> flux get helmreleases
NAME REVISION SUSPENDED READY MESSAGE
kube-prometheus 82.15.0 False True Helm install succeeded for release monitoring/kube-prometheus.v1 with chart kube-prometheus-stack@82.15.0

Now this gave an error for the alert manager, We can investigate this with:

Terminal window
kubectl logs -n monitoring alertmanager-kube-prometheus-kube-prome-alertmanager-0 --all-containers
...
ts=2026-03-27T05:46:05.30694616Z level=error caller=/workspace/cmd/prometheus-config-reloader/main.go:225 msg="Failed to run"
err="too many open files\ncreate watcher\ngithub.com/thanos-io/thanos/pkg/reloader.(*watcher).addPath\n\t/go/pkg/mod/github.co
...

So we can fix this temporarily with:

echo 1024 | sudo tee /proc/sys/fs/inotify/max_user_instances;
sudo sysctl fs.inotify.max_user_watches=10000000

To make this permanent, add the values to a file under sysctl.d. Check man sysctl.d(5) and man sysctl(8) for the correct drop-in path on your distribution. On Fedora Atomic, /etc/sysctl.d/ is a persistent overlay and already writable:

Terminal window
# Check what already exists
ls /etc/sysctl.d/
cat /etc/sysctl.d/40-max-user-watches.conf
# Append the missing setting (watches may already be set)
echo "fs.inotify.max_user_instances=512" | sudo tee -a /etc/sysctl.d/40-max-user-watches.conf

Apply without rebooting:

Terminal window
sudo sysctl --system

The alertmanager pod is in a crash loop, so it will pick up the new limit on its next restart. To force it immediately:

Terminal window
kubectl delete pod -n monitoring alertmanager-kube-prometheus-kube-prome-alertmanager-0

Wait for it to come back and confirm the error is gone:

Terminal window
kubectl get pods -n monitoring -l app.kubernetes.io/name=alertmanager
kubectl logs -n monitoring alertmanager-kube-prometheus-kube-prome-alertmanager-0 --all-containers

In normal operation, data stored through a PersistentVolumeClaim should survive:

  • pod restarts
  • Deployment rollouts
  • StatefulSet pod recreation
  • node reboots
  • Helm upgrades
  • Flux reconciliations

That statement is only true as long as the PersistentVolumeClaim still exists and the backing storage has not been deleted.

The important boundaries are:

  • Restarting a pod does not delete the claim or the volume.
  • Re-applying manifests with Helm or Flux does not delete the claim unless the manifest or chart change removes it.
  • Deleting a namespace usually deletes namespaced PersistentVolumeClaim objects.
  • Deleting the cluster may or may not delete the underlying disk, depending on the platform and storage backend.

The reclaim policy is a property of the PersistentVolume, not the PersistentVolumeClaim.

For dynamically provisioned volumes, the default is commonly Delete. In that mode, deleting the claim usually leads to the backing volume being deleted as well. With Retain, deleting the claim releases the volume but leaves the underlying storage asset behind for manual recovery.

Terminal window
kubectl get storageclass -o custom-columns=NAME:.metadata.name,RECLAIM:.reclaimPolicy
NAME RECLAIM
local-path Delete

If you want Flux to keep managing the application, but you do not want a storage cleanup mistake to destroy the underlying disk, set the bound persistentVolume reclaim policy to Retain.

This is the important distinction:

  • Flux manages Git state.
  • Helm manages the release resources.
  • Kubernetes manages the PersistentVolumeClaim and the bound PersistentVolume.
  • The reclaim policy lives on the persistentVolume, not on the claim and not in Flux.

With reclaim policy Delete, removing the claim usually removes the backing volume too. With reclaim policy Retain, deleting the claim releases the volume but leaves the underlying storage asset behind for manual recovery.

That means Retain does not stop Flux from deleting the claim. It protects the data after the claim has been removed.

Typical use cases:

  • Prometheus data you do not want to lose during Git refactors.
  • Grafana storage you want to recover after an accidental uninstall.
  • Any stateful workload where “delete the app” should not also mean “delete the disk”.

First, let Kubernetes create and bind the claim as usual. Then find the bound volume and patch its reclaim policy:

Terminal window
kubectl get pvc -n monitoring
kubectl get pv
kubectl patch pv <pv-name> -p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'

Verify the result:

Terminal window
kubectl get pv <pv-name> -o custom-columns=NAME:.metadata.name,RECLAIM:.spec.persistentVolumeReclaimPolicy,CLAIM:.spec.claimRef.name

Example:

Terminal window
kubectl patch pv pvc-1234abcd-5678-efgh-9012-ijklmnopqrst \
-p '{"spec":{"persistentVolumeReclaimPolicy":"Retain"}}'

After that:

  • If Flux or Helm deletes the persistentVolumeClaim, the claim is gone.
  • The persistentVolume moves to the Released state instead of being destroyed.
  • The data remains on disk.
  • Re-attaching it is now a manual recovery task.

If a claim was deleted but the volume was retained, the normal recovery flow is:

  1. Inspect the retained persistentVolume.
  2. Clear or update its claimRef if needed.
  3. Create a new persistentVolumeClaim that matches that volume’s class, size, and access mode.
  4. Re-bind the workload to the recovered claim.

This is less convenient than automatic deletion, but it is much safer for data you care about.

Use Retain for selected volumes when:

  • the data has value beyond the lifetime of the release
  • you are still iterating on Flux manifests and want protection from accidental Git deletions
  • you prefer manual cleanup over silent data loss

Leave the default Delete behavior in place when:

  • the data is disposable
  • the environment is short-lived
  • automatic cleanup is more important than recovery

Retain protects the backing volume, but it does not make Flux “storage-aware”. Flux will still reconcile deleted files. If a Helm release or PVC-producing manifest is removed from Git, Flux can still remove the claim. Retain simply turns that situation from “claim deleted and data destroyed” into “claim deleted but data recoverable”.

Creating persistentVolume by creating a persistentVolumeClaim

Section titled “Creating persistentVolume by creating a persistentVolumeClaim”

By creating a persistent volume claim, here we are automatically creating the persistent volumes.

  1. infrastructure/base/monitoring/kustomization.yaml
apiVersion: kustomize.config.k8s.io/v1beta1
kind: Kustomization
resources:
- grafana-secret.yaml
- helmrepository.yaml
- helmrelease.yaml
  1. infrastructure/base/monitoring/helmrelease.yaml
  • diff

    diff --git a/infrastructure/base/monitoring/helmrelease.yaml b/infrastructure/base/monitoring/helmrelease.yaml
    index 2eaff72..6a0f6c1 100644
    --- a/infrastructure/base/monitoring/helmrelease.yaml
    +++ b/infrastructure/base/monitoring/helmrelease.yaml
    @@ -18,9 +18,24 @@ spec:
    storageNamespace: monitoring
    targetNamespace: monitoring
    values:
    + alertmanager:
    + alertmanagerSpec:
    + storage:
    + volumeClaimTemplate:
    + spec:
    + accessModes:
    + - ReadWriteOnce
    + resources:
    + requests:
    + storage: 2Gi
    grafana:
    admin:
    existingSecret: grafana-admin
    + persistence:
    + enabled: true
    + accessModes:
    + - ReadWriteOnce
    + size: 10Gi
    prometheus:
    prometheusSpec:
    resources:
    @@ -28,3 +43,11 @@ spec:
    memory: 1Gi
    requests:
    memory: 512Mi
    + storageSpec:
    + volumeClaimTemplate:
    + spec:
    + accessModes:
    + - ReadWriteOnce
    + resources:
    + requests:
    + storage: 20Gi
  • full

    apiVersion: helm.toolkit.fluxcd.io/v2
    kind: HelmRelease
    metadata:
    name: kube-prometheus
    namespace: flux-system
    spec:
    chart:
    spec:
    chart: kube-prometheus-stack
    reconcileStrategy: ChartVersion
    sourceRef:
    kind: HelmRepository
    name: prometheus-community
    version: 82.15.0
    interval: 5m0s
    releaseName: kube-prometheus
    storageNamespace: monitoring
    targetNamespace: monitoring
    values:
    alertmanager:
    alertmanagerSpec:
    storage:
    volumeClaimTemplate:
    spec:
    accessModes:
    - ReadWriteOnce
    resources:
    requests:
    storage: 2Gi
    grafana:
    admin:
    existingSecret: grafana-admin
    persistence:
    enabled: true
    accessModes:
    # ReadWriteOnce means the PVC can be mounted read-write by one node at a time.
    # This is the common mode for a single Grafana replica backed by one disk.
    - ReadWriteOnce
    # Requested PVC capacity for Grafana data.
    # Kubernetes does not support an "unlimited" PVC size; a concrete storage request is required.
    # If omitted, Helm falls back to the chart's default value instead of allowing unbounded growth.
    size: 10Gi
    prometheus:
    prometheusSpec:
    resources:
    limits:
    memory: 1Gi
    requests:
    memory: 512Mi
    storageSpec:
    volumeClaimTemplate:
    spec:
    accessModes:
    - ReadWriteOnce
    resources:
    requests:
    storage: 20Gi

Declaring a PersistentVolume first, then binding a PersistentVolumeClaim

Section titled “Declaring a PersistentVolume first, then binding a PersistentVolumeClaim”

The previous example relies on dynamic provisioning: the PVC is created first, and the storage class provisions the backing volume for you.

Sometimes you want the opposite model:

  • create the PersistentVolume yourself
  • set its reclaim policy to Retain
  • bind a specific PersistentVolumeClaim to that exact volume

This is useful when you want more explicit control over the disk lifecycle.

For a learning cluster, a minimal example looks like this:

apiVersion: v1
kind: PersistentVolume
metadata:
name: grafana-static-pv
spec:
capacity:
storage: 10Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
storageClassName: ""
hostPath:
path: /var/lib/k8s-static/grafana
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: grafana-static-pvc
namespace: monitoring
spec:
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 10Gi
storageClassName: ""
volumeName: grafana-static-pv

Important details:

  • persistentVolumeReclaimPolicy: Retain is set on the PersistentVolume
  • volumeName: grafana-static-pv tells the claim to bind to that exact volume
  • storageClassName: \"\" avoids accidentally using dynamic provisioning
  • the PVC request must be compatible with the PV’s size and access mode

If the claim is later deleted:

  • the PVC is removed
  • the PV moves to Released
  • the data under the backing path is not automatically deleted

For this specific example, hostPath is acceptable for a local or single-node lab, but it is not the pattern you would normally use for a production cluster.

To verify the binding:

Terminal window
kubectl get pv grafana-static-pv
kubectl get pvc -n monitoring grafana-static-pvc

To use that claim in a workload, reference grafana-static-pvc from the pod or chart values instead of creating a new volume claim template.

Once you commit and then push under the path Flux reconciles, Flux installs it. Check status:

Terminal window
flux get helmreleases

Trigger a reconcile manually if needed:

Terminal window
flux reconcile helmrelease <release-name>

Both commands are part of Flux’s current CLI. (Flux)

For a chart you have not committed to yet:

graph LR
    A[helm install] --> B[test]
    B --> C[helm get values]
    C --> D[helm uninstall]
    D --> E["flux create ... --export"]
    E --> F[commit & push]

This beats trying to have Flux “adopt” a release you installed imperatively, even though Flux can reconcile releases when releaseName and namespaces match. (Flux)

Terminal window
# Trial
helm repo add demo https://charts.example.com
helm repo update
helm install myapp demo/thechart \
-n myapp \
--create-namespace \
--version 1.2.3 \
-f values.yaml
# Inspect and save
helm status myapp -n myapp
helm get values myapp -n myapp -a -o yaml > values.exported.yaml
helm get manifest myapp -n myapp > rendered.exported.yaml
# Remove trial
helm uninstall myapp -n myapp
# Generate Flux YAML
flux create source helm demo \
--url=https://charts.example.com \
--interval=1h \
--export > helmrepository.yaml
flux create helmrelease myapp \
--source=HelmRepository/demo \
--chart=thechart \
--chart-version=1.2.3 \
--release-name=myapp \
--target-namespace=myapp \
--interval=15m \
--values=values.exported.yaml \
--export > helmrelease.yaml

One caution: if the chart installed CRDs or left behind cluster-scoped resources, uninstall may not return the cluster to a blank state. This is chart-specific, not Flux-specific. HelmRelease supports install, upgrade, uninstall, rollback, and drift correction once you move under Flux. (Flux)