diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml
index 945b0d7d5e6..7de4aecac06 100644
--- a/.github/workflows/ci.yaml
+++ b/.github/workflows/ci.yaml
@@ -1,6 +1,6 @@
 # THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
 #
-# Generated on 2024-12-26T15:20:08Z by kres fcff05e.
+# Generated on 2025-01-05T14:42:27Z by kres fcff05e.
 
 name: default
 concurrency:
@@ -2848,7 +2848,7 @@ jobs:
     runs-on:
       - self-hosted
       - talos
-    if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-longhorn')
+    if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/extensions') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-longhorn')
     needs:
       - default
     steps:
@@ -2946,6 +2946,7 @@ jobs:
           EXTRA_TEST_ARGS: -talos.csi=longhorn
           GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
           IMAGE_REGISTRY: registry.dev.siderolabs.io
+          QEMU_MEMORY_WORKERS: "3072"
           QEMU_WORKERS: "3"
           SHORT_INTEGRATION_TEST: "yes"
           WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
diff --git a/.github/workflows/integration-qemu-csi-longhorn-cron.yaml b/.github/workflows/integration-qemu-csi-longhorn-cron.yaml
index 7c81a0fdc78..717409fd189 100644
--- a/.github/workflows/integration-qemu-csi-longhorn-cron.yaml
+++ b/.github/workflows/integration-qemu-csi-longhorn-cron.yaml
@@ -1,6 +1,6 @@
 # THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
 #
-# Generated on 2024-11-28T13:53:18Z by kres 232fe63.
+# Generated on 2025-01-05T14:42:27Z by kres fcff05e.
 
 name: integration-qemu-csi-longhorn-cron
 concurrency:
@@ -109,6 +109,7 @@ jobs:
           EXTRA_TEST_ARGS: -talos.csi=longhorn
           GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
           IMAGE_REGISTRY: registry.dev.siderolabs.io
+          QEMU_MEMORY_WORKERS: "3072"
           QEMU_WORKERS: "3"
           SHORT_INTEGRATION_TEST: "yes"
           WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
diff --git a/.kres.yaml b/.kres.yaml
index b72d6316f81..579d874f933 100644
--- a/.kres.yaml
+++ b/.kres.yaml
@@ -1367,6 +1367,7 @@ spec:
         - '30 3 * * *'
       triggerLabels:
         - integration/qemu-csi
+        - integration/extensions # since iscsi is tested with longhorn
         - integration/qemu-csi-longhorn
       steps:
         - name: download-artifacts
@@ -1422,6 +1423,7 @@ spec:
             GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
             SHORT_INTEGRATION_TEST: yes
             QEMU_WORKERS: 3
+            QEMU_MEMORY_WORKERS: 3072
             WITH_CONFIG_PATCH: "@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml"
             EXTRA_TEST_ARGS: -talos.csi=longhorn
             IMAGE_REGISTRY: registry.dev.siderolabs.io
diff --git a/internal/integration/api/extensions_qemu.go b/internal/integration/api/extensions_qemu.go
index d13f57640b1..4b9ba7b5976 100644
--- a/internal/integration/api/extensions_qemu.go
+++ b/internal/integration/api/extensions_qemu.go
@@ -133,140 +133,6 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsExpectedModules() {
 	suite.AssertExpectedModules(suite.ctx, node, expectedModulesModDep)
 }
 
-// TestExtensionsISCSI verifies expected services are running.
-func (suite *ExtensionsSuiteQEMU) TestExtensionsISCSI() {
-	expectedServices := map[string]string{
-		"ext-iscsid": "Running",
-		"ext-tgtd":   "Running",
-	}
-
-	node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
-	suite.AssertServicesRunning(suite.ctx, node, expectedServices)
-
-	ctx := client.WithNode(suite.ctx, node)
-
-	iscsiCreatePodDef, err := suite.NewPrivilegedPod("iscsi-create")
-	suite.Require().NoError(err)
-
-	suite.Require().NoError(iscsiCreatePodDef.Create(suite.ctx, 5*time.Minute))
-
-	defer iscsiCreatePodDef.Delete(suite.ctx) //nolint:errcheck
-
-	reader, err := suite.Client.Read(ctx, "/system/iscsi/initiatorname.iscsi")
-	suite.Require().NoError(err)
-
-	defer reader.Close() //nolint:errcheck
-
-	body, err := io.ReadAll(reader)
-	suite.Require().NoError(err)
-
-	initiatorName := strings.TrimPrefix(strings.TrimSpace(string(body)), "InitiatorName=")
-
-	stdout, stderr, err := iscsiCreatePodDef.Exec(
-		suite.ctx,
-		fmt.Sprintf("nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op new --mode target --tid 1 -T %s", initiatorName),
-	)
-	suite.Require().NoError(err)
-
-	suite.Require().Equal("", stderr)
-	suite.Require().Equal("", stdout)
-
-	stdout, stderr, err = iscsiCreatePodDef.Exec(
-		suite.ctx,
-		"dd if=/dev/zero of=/proc/$(pgrep tgtd)/root/var/run/tgtd/iscsi.disk bs=1M count=100",
-	)
-	suite.Require().NoError(err)
-
-	suite.Require().Contains(stderr, "100+0 records in\n100+0 records out\n")
-	suite.Require().Equal("", stdout)
-
-	stdout, stderr, err = iscsiCreatePodDef.Exec(
-		suite.ctx,
-		"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /var/run/tgtd/iscsi.disk",
-	)
-	suite.Require().NoError(err)
-
-	suite.Require().Equal("", stderr)
-	suite.Require().Equal("", stdout)
-
-	stdout, stderr, err = iscsiCreatePodDef.Exec(
-		suite.ctx,
-		"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL",
-	)
-	suite.Require().NoError(err)
-
-	suite.Require().Equal("", stderr)
-	suite.Require().Equal("", stdout)
-
-	stdout, stderr, err = iscsiCreatePodDef.Exec(
-		suite.ctx,
-		fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode discovery --type sendtargets --portal %s:3260", node),
-	)
-	suite.Require().NoError(err)
-
-	suite.Require().Equal("", stderr)
-	suite.Require().Equal(fmt.Sprintf("%s:3260,1 %s\n", node, initiatorName), stdout)
-
-	stdout, stderr, err = iscsiCreatePodDef.Exec(
-		suite.ctx,
-		fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode node --targetname %s --portal %s:3260 --login", initiatorName, node),
-	)
-	suite.Require().NoError(err)
-
-	suite.Require().Equal("", stderr)
-	suite.Require().Contains(stdout, "successful.")
-
-	defer func() {
-		stdout, stderr, err = iscsiCreatePodDef.Exec(
-			suite.ctx,
-			fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode node --targetname %s --portal %s:3260 --logout", initiatorName, node),
-		)
-		suite.Require().NoError(err)
-
-		suite.Require().Equal("", stderr)
-
-		stdout, stderr, err = iscsiCreatePodDef.Exec(
-			suite.ctx,
-			"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op delete --mode logicalunit --tid 1 --lun 1",
-		)
-		suite.Require().NoError(err)
-
-		suite.Require().Equal("", stderr)
-		suite.Require().Equal("", stdout)
-
-		stdout, stderr, err = iscsiCreatePodDef.Exec(
-			suite.ctx,
-			"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op delete --mode target --tid 1",
-		)
-
-		suite.Require().NoError(err)
-
-		suite.Require().Equal("", stderr)
-		suite.Require().Equal("", stdout)
-	}()
-
-	suite.Eventually(func() bool {
-		return suite.iscsiTargetExists()
-	}, 5*time.Second, 1*time.Second, "expected iscsi target to exist")
-}
-
-func (suite *ExtensionsSuiteQEMU) iscsiTargetExists() bool {
-	node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
-
-	ctx := client.WithNode(suite.ctx, node)
-
-	disks, err := safe.ReaderListAll[*block.Disk](ctx, suite.Client.COSI)
-	suite.Require().NoError(err)
-
-	for disk := range disks.All() {
-		if disk.TypedSpec().Transport == "iscsi" {
-			return true
-		}
-	}
-
-	return false
-}
-
 // TestExtensionsNutClient verifies nut client is working.
 func (suite *ExtensionsSuiteQEMU) TestExtensionsNutClient() {
 	node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
diff --git a/internal/integration/base/k8s.go b/internal/integration/base/k8s.go
index a984d86db44..f9e7ff80cfd 100644
--- a/internal/integration/base/k8s.go
+++ b/internal/integration/base/k8s.go
@@ -635,6 +635,26 @@ func (k8sSuite *K8sSuite) WaitForResource(ctx context.Context, namespace, group,
 	return nil
 }
 
+// GetUnstructuredResource gets the unstructured resource with the given namespace, group, kind, version and name.
+func (k8sSuite *K8sSuite) GetUnstructuredResource(ctx context.Context, namespace, group, kind, version, resourceName string) (*unstructured.Unstructured, error) {
+	mapping, err := k8sSuite.Mapper.RESTMapping(schema.GroupKind{
+		Group: group,
+		Kind:  kind,
+	}, version)
+	if err != nil {
+		return nil, fmt.Errorf("error creating mapping for resource %s/%s/%s", group, kind, version)
+	}
+
+	dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(namespace)
+
+	result, err := dr.Get(ctx, resourceName, metav1.GetOptions{})
+	if err != nil {
+		return nil, fmt.Errorf("error getting resource %s/%s/%s/%s: %v", group, version, kind, resourceName, err)
+	}
+
+	return result, nil
+}
+
 // RunFIOTest runs the FIO test with the given storage class and size using kubestr.
 func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClasss, size string) error {
 	args := []string{
@@ -788,7 +808,7 @@ func (k8sSuite *K8sSuite) DeleteManifests(ctx context.Context, manifests []unstr
 			return event.Type == watch.Deleted, nil
 		})
 
-		k8sSuite.Require().NoError(err, "error waiting for the object to be deleted %s", obj.GetName())
+		k8sSuite.Require().NoError(err, "error waiting for the object to be deleted %s/%s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())
 
 		k8sSuite.T().Logf("deleted object %s/%s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())
 	}
diff --git a/internal/integration/k8s/longhorn.go b/internal/integration/k8s/longhorn.go
index 91287ba4b5c..388786ce9ef 100644
--- a/internal/integration/k8s/longhorn.go
+++ b/internal/integration/k8s/longhorn.go
@@ -7,10 +7,27 @@
 package k8s
 
 import (
+	"bytes"
 	"context"
+	_ "embed"
+	"strings"
+	"testing"
+	"text/template"
 	"time"
 
 	"github.com/siderolabs/talos/internal/integration/base"
+	"github.com/siderolabs/talos/pkg/machinery/config/machine"
+)
+
+var (
+	//go:embed testdata/longhorn-iscsi-volume.yaml
+	longHornISCSIVolumeManifest []byte
+
+	//go:embed testdata/longhorn-volumeattachment.yaml
+	longHornISCSIVolumeAttachmentManifestTemplate []byte
+
+	//go:embed testdata/pod-iscsi-volume.yaml
+	podWithISCSIVolumeTemplate []byte
 )
 
 // LongHornSuite tests deploying Longhorn.
@@ -24,6 +41,8 @@ func (suite *LongHornSuite) SuiteName() string {
 }
 
 // TestDeploy tests deploying Longhorn and running a simple test.
+//
+//nolint:gocyclo
 func (suite *LongHornSuite) TestDeploy() {
 	if suite.Cluster == nil {
 		suite.T().Skip("without full cluster state reaching out to the node IP is not reliable")
@@ -53,7 +72,117 @@ func (suite *LongHornSuite) TestDeploy() {
 		suite.T().Fatalf("failed to install Longhorn chart: %v", err)
 	}
 
-	suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G"))
+	suite.T().Run("fio", func(t *testing.T) {
+		t.Parallel()
+
+		suite.Require().NoError(suite.RunFIOTest(ctx, "longhorn", "10G"))
+	})
+
+	suite.T().Run("iscsi", func(t *testing.T) {
+		t.Parallel()
+
+		longHornISCSIVolumeManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeManifest)
+
+		defer func() {
+			cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), 2*time.Minute)
+			defer cleanupCancel()
+
+			suite.DeleteManifests(cleanUpCtx, longHornISCSIVolumeManifestUnstructured)
+		}()
+
+		suite.ApplyManifests(ctx, longHornISCSIVolumeManifestUnstructured)
+
+		tmpl, err := template.New("longhorn-iscsi-volumeattachment").Parse(string(longHornISCSIVolumeAttachmentManifestTemplate))
+		suite.Require().NoError(err)
+
+		var longHornISCSIVolumeAttachmentManifest bytes.Buffer
+
+		node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
+
+		nodeInfo, err := suite.GetK8sNodeByInternalIP(ctx, node)
+		if err != nil {
+			suite.T().Fatalf("failed to get K8s node by internal IP: %v", err)
+		}
+
+		if err := tmpl.Execute(&longHornISCSIVolumeAttachmentManifest, struct {
+			NodeID string
+		}{
+			NodeID: nodeInfo.Name,
+		}); err != nil {
+			suite.T().Fatalf("failed to render Longhorn ISCSI volume manifest: %v", err)
+		}
+
+		longHornISCSIVolumeAttachmentManifestUnstructured := suite.ParseManifests(longHornISCSIVolumeAttachmentManifest.Bytes())
+
+		suite.ApplyManifests(ctx, longHornISCSIVolumeAttachmentManifestUnstructured)
+
+		if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.robustness}", "healthy"); err != nil {
+			suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
+		}
+
+		if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Volume", "v1beta2", "iscsi", "{.status.state}", "attached"); err != nil {
+			suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
+		}
+
+		if err := suite.WaitForResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0", "{.status.currentState}", "running"); err != nil {
+			suite.T().Fatalf("failed to wait for LongHorn Engine to be Ready: %v", err)
+		}
+
+		unstructured, err := suite.GetUnstructuredResource(ctx, "longhorn-system", "longhorn.io", "Engine", "v1beta2", "iscsi-e-0")
+		if err != nil {
+			suite.T().Fatalf("failed to get LongHorn Engine resource: %v", err)
+		}
+
+		var endpointData string
+
+		if status, ok := unstructured.Object["status"].(map[string]interface{}); ok {
+			endpointData, ok = status["endpoint"].(string)
+			if !ok {
+				suite.T().Fatalf("failed to get LongHorn Engine endpoint")
+			}
+		}
+
+		tmpl, err = template.New("pod-iscsi-volume").Parse(string(podWithISCSIVolumeTemplate))
+		suite.Require().NoError(err)
+
+		// endpoint is of the form `iscsi://10.244.0.5:3260/iqn.2019-10.io.longhorn:iscsi/1`
+		// trim the iscsi:// prefix
+		endpointData = strings.TrimPrefix(endpointData, "iscsi://")
+		// trim the /1 suffix
+		endpointData = strings.TrimSuffix(endpointData, "/1")
+
+		targetPortal, IQN, ok := strings.Cut(endpointData, "/")
+		if !ok {
+			suite.T().Fatalf("failed to parse endpoint data from %s", endpointData)
+		}
+
+		var podWithISCSIVolume bytes.Buffer
+
+		if err := tmpl.Execute(&podWithISCSIVolume, struct {
+			NodeName     string
+			TargetPortal string
+			IQN          string
+		}{
+			NodeName:     nodeInfo.Name,
+			TargetPortal: targetPortal,
+			IQN:          IQN,
+		}); err != nil {
+			suite.T().Fatalf("failed to render pod with ISCSI volume manifest: %v", err)
+		}
+
+		podWithISCSIVolumeUnstructured := suite.ParseManifests(podWithISCSIVolume.Bytes())
+
+		defer func() {
+			cleanUpCtx, cleanupCancel := context.WithTimeout(context.Background(), time.Minute)
+			defer cleanupCancel()
+
+			suite.DeleteManifests(cleanUpCtx, podWithISCSIVolumeUnstructured)
+		}()
+
+		suite.ApplyManifests(ctx, podWithISCSIVolumeUnstructured)
+
+		suite.WaitForPodToBeRunning(ctx, 3*time.Minute, "default", "iscsipd")
+	})
 }
 
 func init() {
diff --git a/internal/integration/k8s/testdata/longhorn-iscsi-volume.yaml b/internal/integration/k8s/testdata/longhorn-iscsi-volume.yaml
new file mode 100644
index 00000000000..02d96ab73c4
--- /dev/null
+++ b/internal/integration/k8s/testdata/longhorn-iscsi-volume.yaml
@@ -0,0 +1,12 @@
+---
+apiVersion: longhorn.io/v1beta2
+kind: Volume
+metadata:
+  labels:
+    longhornvolume: iscsi
+  name: iscsi
+  namespace: longhorn-system
+spec:
+  frontend: iscsi
+  numberOfReplicas: 1
+  size: "1073741824"
diff --git a/internal/integration/k8s/testdata/longhorn-volumeattachment.yaml b/internal/integration/k8s/testdata/longhorn-volumeattachment.yaml
new file mode 100644
index 00000000000..789b5ef7dd2
--- /dev/null
+++ b/internal/integration/k8s/testdata/longhorn-volumeattachment.yaml
@@ -0,0 +1,19 @@
+---
+apiVersion: longhorn.io/v1beta2
+kind: VolumeAttachment
+metadata:
+  labels:
+    longhornvolume: iscsi
+  name: iscsi
+  namespace: longhorn-system
+spec:
+  attachmentTickets:
+    longhorn-ui:
+      generation: 0
+      id: longhorn-ui
+      nodeID: {{ .NodeID }}
+      parameters:
+        disableFrontend: "false"
+        lastAttachedBy: ""
+      type: longhorn-api
+  volume: iscsi
diff --git a/internal/integration/k8s/testdata/pod-iscsi-volume.yaml b/internal/integration/k8s/testdata/pod-iscsi-volume.yaml
new file mode 100644
index 00000000000..c1748c9fa34
--- /dev/null
+++ b/internal/integration/k8s/testdata/pod-iscsi-volume.yaml
@@ -0,0 +1,24 @@
+---
+apiVersion: v1
+kind: Pod
+metadata:
+  name: iscsipd
+  namespace: default
+spec:
+  containers:
+  - name: iscsipd-rw
+    image: alpine
+    command: ["/bin/sh", "-c", "--"]
+    args: ["trap : TERM INT; (sleep 1000) & wait"]
+    volumeMounts:
+    - mountPath: "/mnt/iscsipd"
+      name: iscsipd-rw
+  nodeName: {{ .NodeName }}
+  volumes:
+  - name: iscsipd-rw
+    iscsi:
+      targetPortal: {{ .TargetPortal }}
+      iqn: {{ .IQN }}
+      lun: 1
+      fsType: ext4
+      readOnly: false