Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

chore(ci): rework iscsi-tools extensions test #10087

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions .github/workflows/ci.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2025-01-16T14:10:07Z by kres 3b3f992.
# Generated on 2025-01-20T16:57:17Z by kres 3b3f992.

name: default
concurrency:
Expand Down Expand Up @@ -2848,7 +2848,7 @@ jobs:
runs-on:
- self-hosted
- talos
if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-longhorn')
if: contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi') || contains(fromJSON(needs.default.outputs.labels), 'integration/extensions') || contains(fromJSON(needs.default.outputs.labels), 'integration/qemu-csi-longhorn')
needs:
- default
steps:
Expand Down Expand Up @@ -2941,14 +2941,18 @@ jobs:
IMAGE_REGISTRY: registry.dev.siderolabs.io
run: |
make installer-with-extensions
- name: kubelet-fat-patch
run: |
make kubelet-fat-patch
- name: e2e-qemu-csi-longhorn
env:
EXTRA_TEST_ARGS: -talos.csi=longhorn
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_MEMORY_WORKERS: "3072"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
8 changes: 6 additions & 2 deletions .github/workflows/integration-qemu-csi-longhorn-cron.yaml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# THIS FILE WAS AUTOMATICALLY GENERATED, PLEASE DO NOT EDIT.
#
# Generated on 2025-01-16T11:00:37Z by kres 3b3f992.
# Generated on 2025-01-20T16:57:17Z by kres 3b3f992.

name: integration-qemu-csi-longhorn-cron
concurrency:
Expand Down Expand Up @@ -104,14 +104,18 @@ jobs:
IMAGE_REGISTRY: registry.dev.siderolabs.io
run: |
make installer-with-extensions
- name: kubelet-fat-patch
run: |
make kubelet-fat-patch
- name: e2e-qemu-csi-longhorn
env:
EXTRA_TEST_ARGS: -talos.csi=longhorn
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
IMAGE_REGISTRY: registry.dev.siderolabs.io
QEMU_MEMORY_WORKERS: "3072"
QEMU_WORKERS: "3"
SHORT_INTEGRATION_TEST: "yes"
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml'
WITH_CONFIG_PATCH: '@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml'
run: |
sudo -E make e2e-qemu
- name: save artifacts
Expand Down
5 changes: 4 additions & 1 deletion .kres.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -1369,6 +1369,7 @@ spec:
- '30 3 * * *'
triggerLabels:
- integration/qemu-csi
- integration/extensions # since iscsi is tested with longhorn
- integration/qemu-csi-longhorn
steps:
- name: download-artifacts
Expand Down Expand Up @@ -1417,14 +1418,16 @@ spec:
environment:
EXTENSIONS_FILTER_COMMAND: "grep -E 'iscsi-tools|util-linux-tools'"
IMAGE_REGISTRY: registry.dev.siderolabs.io
- name: kubelet-fat-patch
- name: e2e-qemu-csi-longhorn
command: e2e-qemu
withSudo: true
environment:
GITHUB_STEP_NAME: ${{ github.job}}-e2e-qemu-csi-longhorn
SHORT_INTEGRATION_TEST: yes
QEMU_WORKERS: 3
WITH_CONFIG_PATCH: "@_out/installer-extensions-patch.yaml:@hack/test/patches/longhorn.yaml"
QEMU_MEMORY_WORKERS: 3072
WITH_CONFIG_PATCH: "@_out/installer-extensions-patch.yaml:@_out/kubelet-fat-patch.yaml:@hack/test/patches/longhorn.yaml"
EXTRA_TEST_ARGS: -talos.csi=longhorn
IMAGE_REGISTRY: registry.dev.siderolabs.io
- name: save-talos-logs
Expand Down
3 changes: 3 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -608,6 +608,9 @@ installer-with-extensions: $(ARTIFACTS)/extensions/_out/extensions-metadata
crane push $(ARTIFACTS)/installer-amd64.tar $(REGISTRY_AND_USERNAME)/installer:$(IMAGE_TAG)-amd64-extensions
INSTALLER_IMAGE_EXTENSIONS="$(REGISTRY_AND_USERNAME)/installer:$(IMAGE_TAG)-amd64-extensions" yq eval -n '.machine.install.image = strenv(INSTALLER_IMAGE_EXTENSIONS)' > $(ARTIFACTS)/installer-extensions-patch.yaml

kubelet-fat-patch:
K8S_VERSION=$(KUBECTL_VERSION) yq eval -n '.machine.kubelet.image = "ghcr.io/siderolabs/kubelet:" + strenv(K8S_VERSION) + "-fat"' > $(ARTIFACTS)/kubelet-fat-patch.yaml

# Assets for releases

.PHONY: $(ARTIFACTS)/$(TALOS_RELEASE)
Expand Down
134 changes: 0 additions & 134 deletions internal/integration/api/extensions_qemu.go
Original file line number Diff line number Diff line change
Expand Up @@ -133,140 +133,6 @@ func (suite *ExtensionsSuiteQEMU) TestExtensionsExpectedModules() {
suite.AssertExpectedModules(suite.ctx, node, expectedModulesModDep)
}

// TestExtensionsISCSI verifies expected services are running.
func (suite *ExtensionsSuiteQEMU) TestExtensionsISCSI() {
expectedServices := map[string]string{
"ext-iscsid": "Running",
"ext-tgtd": "Running",
}

node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
suite.AssertServicesRunning(suite.ctx, node, expectedServices)

ctx := client.WithNode(suite.ctx, node)

iscsiCreatePodDef, err := suite.NewPrivilegedPod("iscsi-create")
suite.Require().NoError(err)

suite.Require().NoError(iscsiCreatePodDef.Create(suite.ctx, 5*time.Minute))

defer iscsiCreatePodDef.Delete(suite.ctx) //nolint:errcheck

reader, err := suite.Client.Read(ctx, "/system/iscsi/initiatorname.iscsi")
suite.Require().NoError(err)

defer reader.Close() //nolint:errcheck

body, err := io.ReadAll(reader)
suite.Require().NoError(err)

initiatorName := strings.TrimPrefix(strings.TrimSpace(string(body)), "InitiatorName=")

stdout, stderr, err := iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op new --mode target --tid 1 -T %s", initiatorName),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"dd if=/dev/zero of=/proc/$(pgrep tgtd)/root/var/run/tgtd/iscsi.disk bs=1M count=100",
)
suite.Require().NoError(err)

suite.Require().Contains(stderr, "100+0 records in\n100+0 records out\n")
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op new --mode logicalunit --tid 1 --lun 1 -b /var/run/tgtd/iscsi.disk",
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op bind --mode target --tid 1 -I ALL",
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode discovery --type sendtargets --portal %s:3260", node),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal(fmt.Sprintf("%s:3260,1 %s\n", node, initiatorName), stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode node --targetname %s --portal %s:3260 --login", initiatorName, node),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Contains(stdout, "successful.")

defer func() {
stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
fmt.Sprintf("nsenter --mount=/proc/$(pgrep iscsid)/ns/mnt --net=/proc/$(pgrep iscsid)/ns/net -- iscsiadm --mode node --targetname %s --portal %s:3260 --logout", initiatorName, node),
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op delete --mode logicalunit --tid 1 --lun 1",
)
suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)

stdout, stderr, err = iscsiCreatePodDef.Exec(
suite.ctx,
"nsenter --mount=/proc/1/ns/mnt -- tgtadm --lld iscsi --op delete --mode target --tid 1",
)

suite.Require().NoError(err)

suite.Require().Equal("", stderr)
suite.Require().Equal("", stdout)
}()

suite.Eventually(func() bool {
return suite.iscsiTargetExists()
}, 5*time.Second, 1*time.Second, "expected iscsi target to exist")
}

func (suite *ExtensionsSuiteQEMU) iscsiTargetExists() bool {
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)

ctx := client.WithNode(suite.ctx, node)

disks, err := safe.ReaderListAll[*block.Disk](ctx, suite.Client.COSI)
suite.Require().NoError(err)

for disk := range disks.All() {
if disk.TypedSpec().Transport == "iscsi" {
return true
}
}

return false
}

// TestExtensionsNutClient verifies nut client is working.
func (suite *ExtensionsSuiteQEMU) TestExtensionsNutClient() {
node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker)
Expand Down
26 changes: 24 additions & 2 deletions internal/integration/base/k8s.go
Original file line number Diff line number Diff line change
Expand Up @@ -636,6 +636,26 @@ func (k8sSuite *K8sSuite) WaitForResource(ctx context.Context, namespace, group,
return nil
}

// GetUnstructuredResource gets the unstructured resource with the given namespace, group, kind, version and name.
func (k8sSuite *K8sSuite) GetUnstructuredResource(ctx context.Context, namespace, group, kind, version, resourceName string) (*unstructured.Unstructured, error) {
mapping, err := k8sSuite.Mapper.RESTMapping(schema.GroupKind{
Group: group,
Kind: kind,
}, version)
if err != nil {
return nil, fmt.Errorf("error creating mapping for resource %s/%s/%s", group, kind, version)
}

dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(namespace)

result, err := dr.Get(ctx, resourceName, metav1.GetOptions{})
if err != nil {
return nil, fmt.Errorf("error getting resource %s/%s/%s/%s: %v", group, version, kind, resourceName, err)
}

return result, nil
}

// RunFIOTest runs the FIO test with the given storage class and size using kubestr.
func (k8sSuite *K8sSuite) RunFIOTest(ctx context.Context, storageClasss, size string) error {
args := []string{
Expand Down Expand Up @@ -730,7 +750,9 @@ func (k8sSuite *K8sSuite) ApplyManifests(ctx context.Context, manifests []unstru

dr := k8sSuite.DynamicClient.Resource(mapping.Resource).Namespace(obj.GetNamespace())

_, err = dr.Create(ctx, &obj, metav1.CreateOptions{})
_, err = dr.Apply(ctx, obj.GetName(), &obj, metav1.ApplyOptions{
FieldManager: "talos-e2e",
})
k8sSuite.Require().NoError(err, "error creating object %s", obj.GetName())

k8sSuite.T().Logf("created object %s/%s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())
Expand Down Expand Up @@ -793,7 +815,7 @@ func (k8sSuite *K8sSuite) DeleteManifests(ctx context.Context, manifests []unstr
return event.Type == watch.Deleted, nil
})

k8sSuite.Require().NoError(err, "error waiting for the object to be deleted %s", obj.GetName())
k8sSuite.Require().NoError(err, "error waiting for the object to be deleted %s/%s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())

k8sSuite.T().Logf("deleted object %s/%s/%s", obj.GetObjectKind().GroupVersionKind(), obj.GetNamespace(), obj.GetName())
}
Expand Down
Loading
Loading