diff --git a/api_client/apiclient.go b/api_client/apiclient.go new file mode 100644 index 00000000..53e29683 --- /dev/null +++ b/api_client/apiclient.go @@ -0,0 +1,79 @@ +package apiclient + +import ( + "context" + "strings" + "time" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/pkg/utils" + "github.com/storageos/kubectl-storageos/pkg/version" + "k8s.io/client-go/rest" +) + +// Client wraps the openAPI client providing a collection of useful methods such +// as GetEntityByName, making use of the underlying client endpoints and +// extending their logic with filters, objects transformations, etc. +// +// It also decorates the underlying openAPI Authenticate() method with cached +// auth capabilities, reusing an in-memory token (session) if it exists. If that +// fails, it will continue as normal and then cache the new token. +// This cache is used until Authenticate is called again (manually or when the +// reauth wrapper is triggered). +type client struct { + *openapi.OpenAPI + + cacheSessionToken string + cacheSessionExpiresAt time.Time +} + +func GetAPIClient(restConfig *rest.Config) (*ClientWithReauth, error) { + userAgent := strings.Join([]string{"ondat kubectl plugin", version.PluginVersion}, "/") + + endpoint, err := utils.GetFirstStorageOSAPIEndpoint(restConfig) + if err != nil { + return nil, err + } + + basicOpenAPIClient, err := openapi.NewOpenAPI([]string{endpoint}, userAgent) + if err != nil { + return nil, err + } + + // client wrapper adding some extra useful methods on top + internalClient := client{ + OpenAPI: basicOpenAPIClient, + } + + username, password, err := utils.GetAPICredentialsFromSecret(restConfig) + if err != nil { + return nil, err + } + + // client wrapper adding reauth on first failure + clientWithReauth := NewClientWithReauth(internalClient, username, password) + + return clientWithReauth, nil +} + +func (c *client) Authenticate(ctx context.Context, username, password string) (string, time.Time, error) { + // TODO make configurable if we want to skip this feature of caching the session + if c.cacheSessionToken != "" && c.cacheSessionExpiresAt.After(time.Now()) { + err := c.UseAuthSession(ctx, c.cacheSessionToken) + if err == nil { + return c.cacheSessionToken, c.cacheSessionExpiresAt, nil + } + // failed to setup auth with in-memory session, proceed to create a new one + } + + sessionToken, expiresBy, err := c.OpenAPI.Authenticate(ctx, username, password) + if err != nil { + return "", time.Now().Add(-time.Minute), err + } + + // store new session in-memory + c.cacheSessionToken = sessionToken + c.cacheSessionExpiresAt = expiresBy + + return sessionToken, expiresBy, nil +} diff --git a/api_client/diagnostics.go b/api_client/diagnostics.go new file mode 100644 index 00000000..3523889b --- /dev/null +++ b/api_client/diagnostics.go @@ -0,0 +1,15 @@ +package apiclient + +import ( + "context" + "io" +) + +func (c *client) GetSingleNodeDiagnosticsByName(ctx context.Context, name string) (io.ReadCloser, string, error) { + node, err := c.GetNodeByName(ctx, name) + if err != nil { + return nil, "", err + } + + return c.GetSingleNodeDiagnostics(ctx, node.ID) +} diff --git a/api_client/licence.go b/api_client/licence.go new file mode 100644 index 00000000..4de85674 --- /dev/null +++ b/api_client/licence.go @@ -0,0 +1,26 @@ +package apiclient + +import ( + "context" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/model" +) + +// UpdateLicence sends a new version of the licence to apply to the current +// cluster. It returns the new licence resource if correctly applied. It doesn't +// require a version but overwrite the licence using the last available version +// from the current licence. +func (c *client) UpdateLicence(ctx context.Context, licence []byte, params *openapi.UpdateLicenceRequestParams) (*model.License, error) { + if params == nil || params.CASVersion == "" { + l, err := c.GetLicence(ctx) + if err != nil { + return nil, err + } + params = &openapi.UpdateLicenceRequestParams{ + CASVersion: l.Version, + } + } + + return c.UpdateLicence(ctx, licence, params) +} diff --git a/api_client/namespace.go b/api_client/namespace.go new file mode 100644 index 00000000..91c82046 --- /dev/null +++ b/api_client/namespace.go @@ -0,0 +1,122 @@ +package apiclient + +import ( + "context" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/model" +) + +// GetNamespaceByName requests basic information for the namespace resource +// which has the given name. +// +// The resource model for the API is build around using unique identifiers, +// so this operation is inherently more expensive than the corresponding +// GetNamespace() operation. +// +// Retrieving a namespace resource by name involves requesting a list of all +// namespaces from the StorageOS API and returning the first one where the +// name matches. +func (c *client) GetNamespaceByName(ctx context.Context, name string) (*model.Namespace, error) { + namespaces, err := c.ListNamespaces(ctx) + if err != nil { + return nil, err + } + + for _, ns := range namespaces { + if ns.Name == name { + return ns, nil + } + } + + return nil, openapi.NewNamespaceNameNotFoundError(name) +} + +// GetListNamespacesByUID requests a list of namespace resources present in the +// cluster. +// +// The returned list is filtered using uids so that it contains only those +// namespace resources which have a matching ID. If no uids are given then +// all namespaces are returned. +func (c *client) GetListNamespacesByUID(ctx context.Context, uids ...string) ([]*model.Namespace, error) { + resources, err := c.ListNamespaces(ctx) + if err != nil { + return nil, err + } + + return filterNamespacesForUIDs(resources, uids...) +} + +// GetListNamespacesByName requests a list of namespace resources present in +// the cluster. +// +// The returned list is filtered using names so that it contains only those +// namespaces resources which have a matching name. If no names are given then +// all namespaces are returned. +func (c *client) GetListNamespacesByName(ctx context.Context, names ...string) ([]*model.Namespace, error) { + resources, err := c.ListNamespaces(ctx) + if err != nil { + return nil, err + } + + return filterNamespacesForNames(resources, names...) +} + +// filterNamespacesForNames will return a subset of namespaces containing +// resources which have one of the provided names. If names is not provided, +// namespaces is returned as is. +// +// If there is no resource for a given name then an error is returned, thus +// this is a strict helper. +func filterNamespacesForNames(namespaces []*model.Namespace, names ...string) ([]*model.Namespace, error) { + if len(names) == 0 { + return namespaces, nil + } + + retrieved := map[string]*model.Namespace{} + + for _, ns := range namespaces { + retrieved[ns.Name] = ns + } + + filtered := make([]*model.Namespace, 0, len(names)) + for _, name := range names { + ns, ok := retrieved[name] + if !ok { + return nil, openapi.NewNamespaceNameNotFoundError(name) + } + filtered = append(filtered, ns) + } + + return filtered, nil +} + +// filterNamespacesForUIDS will return a subset of namespaces containing +// resources which have one of the provided uids. If uids is not provided, +// namespaces is returned as is. +// +// If there is no resource for a given uid then an error is returned, thus +// this is a strict helper. +func filterNamespacesForUIDs(namespaces []*model.Namespace, uids ...string) ([]*model.Namespace, error) { + if len(uids) == 0 { + return namespaces, nil + } + + retrieved := map[string]*model.Namespace{} + + for _, ns := range namespaces { + retrieved[ns.ID] = ns + } + + filtered := make([]*model.Namespace, 0, len(uids)) + + for _, idVar := range uids { + ns, ok := retrieved[idVar] + if !ok { + return nil, openapi.NewNamespaceNotFoundError(idVar) + } + filtered = append(filtered, ns) + } + + return filtered, nil +} diff --git a/api_client/node.go b/api_client/node.go new file mode 100644 index 00000000..293755a0 --- /dev/null +++ b/api_client/node.go @@ -0,0 +1,123 @@ +package apiclient + +import ( + "context" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/model" +) + +// GetNodeByName requests basic information for the node resource which has +// name. +// +// The resource model for the API is build around using unique identifiers, +// so this operation is inherently more expensive than the corresponding +// GetNode() operation. +// +// Retrieving a node resource by name involves requesting a list of all nodes +// in the cluster from the StorageOS API and returning the first node where the +// name matches. +func (c *client) GetNodeByName(ctx context.Context, name string) (*model.Node, error) { + nodes, err := c.ListNodes(ctx) + if err != nil { + return nil, err + } + + for _, n := range nodes { + if n.Name == name { + return n, nil + } + } + + return nil, openapi.NewNodeNameNotFoundError(name) +} + +// GetListNodesByUID requests a list containing basic information on each +// node resource in the cluster. +// +// The returned list is filtered using uids so that it contains only those +// resources which have a matching ID. Omitting uids will skip the filtering. +func (c *client) GetListNodesByUID(ctx context.Context, uids ...string) ([]*model.Node, error) { + nodes, err := c.ListNodes(ctx) + if err != nil { + return nil, err + } + + return filterNodesForUIDs(nodes, uids...) +} + +// GetListNodesByName requests a list containing basic information on each +// node resource in the cluster. +// +// The returned list is filtered using names so that it contains only those +// resources which have a matching name. Omitting names will skip the filtering. +func (c *client) GetListNodesByName(ctx context.Context, names ...string) ([]*model.Node, error) { + nodes, err := c.ListNodes(ctx) + if err != nil { + return nil, err + } + + return filterNodesForNames(nodes, names...) +} + +// filterNodesForNames will return a subset of nodes containing resources +// which have one of the provided names. If names is not provided, nodes is +// returned as is. +// +// If there is no resource for a given name then an error is returned, thus +// this is a strict helper. +func filterNodesForNames(nodes []*model.Node, names ...string) ([]*model.Node, error) { + // return everything if no filter names given + if len(names) == 0 { + return nodes, nil + } + + retrieved := map[string]*model.Node{} + + for _, n := range nodes { + retrieved[n.Name] = n + } + + filtered := make([]*model.Node, 0, len(names)) + + for _, name := range names { + n, ok := retrieved[name] + if !ok { + return nil, openapi.NewNodeNameNotFoundError(name) + } + filtered = append(filtered, n) + } + + return filtered, nil +} + +// filterNodesForUIDs will return a subset of nodes containing resources +// which have one of the provided uids. If uids is not provided, nodes is +// returned as is. +// +// If there is no resource for a given uid then an error is returned, thus +// this is a strict helper. +func filterNodesForUIDs(nodes []*model.Node, uids ...string) ([]*model.Node, error) { + // return everything if no filter uids given + if len(uids) == 0 { + return nodes, nil + } + + retrieved := map[string]*model.Node{} + + for _, n := range nodes { + retrieved[n.ID] = n + } + + filtered := make([]*model.Node, 0, len(uids)) + + for _, idVar := range uids { + n, ok := retrieved[idVar] + if !ok { + return nil, openapi.NewNodeNotFoundError(idVar) + } + filtered = append(filtered, n) + } + + return filtered, nil +} diff --git a/api_client/openapi/cluster.go b/api_client/openapi/cluster.go new file mode 100644 index 00000000..779e46b8 --- /dev/null +++ b/api_client/openapi/cluster.go @@ -0,0 +1,68 @@ +package openapi + +import ( + "context" + + "github.com/antihax/optional" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" +) + +// UpdateClusterRequestParams contains optional request parameters for a update +// cluster operation. +type UpdateClusterRequestParams struct { + CASVersion string +} + +func (o *OpenAPI) GetCluster(ctx context.Context) (*model.Cluster, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + model, resp, err := o.client.DefaultApi.GetCluster(ctx) + if err != nil { + return nil, mapOpenAPIError(err, resp) + } + + return DecodeCluster(model) +} + +func (o *OpenAPI) UpdateCluster(ctx context.Context, resource *model.Cluster, params *UpdateClusterRequestParams) (*model.Cluster, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + level, err := EncodeLogLevel(resource.LogLevel) + if err != nil { + return nil, err + } + + format, err := EncodeLogFormat(resource.LogFormat) + if err != nil { + return nil, err + } + + updateData := openapi.UpdateClusterData{ + DisableTelemetry: resource.DisableTelemetry, + DisableCrashReporting: resource.DisableCrashReporting, + DisableVersionCheck: resource.DisableVersionCheck, + LogLevel: level, + LogFormat: format, + } + + opts := &openapi.UpdateClusterOpts{ + IgnoreVersion: optional.NewBool(true), + } + + // check optional params + if params != nil && params.CASVersion != "" { + updateData.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + + model, resp, err := o.client.DefaultApi.UpdateCluster(ctx, updateData, opts) + if err != nil { + return nil, mapOpenAPIError(err, resp) + } + + return DecodeCluster(model) +} diff --git a/api_client/openapi/codec.go b/api_client/openapi/codec.go new file mode 100644 index 00000000..a71c2511 --- /dev/null +++ b/api_client/openapi/codec.go @@ -0,0 +1,317 @@ +package openapi + +import ( + "errors" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" + "github.com/storageos/kubectl-storageos/pkg/health" +) + +func DecodeCluster(cluster openapi.Cluster) (*model.Cluster, error) { + return &model.Cluster{ + ID: cluster.Id, + + DisableTelemetry: cluster.DisableTelemetry, + DisableCrashReporting: cluster.DisableCrashReporting, + DisableVersionCheck: cluster.DisableVersionCheck, + + LogLevel: model.LogLevelFromString(string(cluster.LogLevel)), + LogFormat: model.LogFormatFromString(string(cluster.LogFormat)), + + CreatedAt: cluster.CreatedAt, + UpdatedAt: cluster.UpdatedAt, + Version: cluster.Version, + }, nil +} + +func DecodeLicence(license openapi.Licence) (*model.License, error) { + features := make([]string, 0) + if license.Features != nil { + features = append(features, *license.Features...) + } + + return &model.License{ + ClusterID: license.ClusterID, + ExpiresAt: license.ExpiresAt, + ClusterCapacityBytes: license.ClusterCapacityBytes, + UsedBytes: license.UsedBytes, + Kind: license.Kind, + CustomerName: license.CustomerName, + Features: features, + Version: license.Version, + }, nil +} + +func DecodeCapacityStats(stats openapi.CapacityStats) model.Stats { + return model.Stats{ + Total: stats.Total, + Free: stats.Free, + } +} + +func DecodeNode(node openapi.Node) (*model.Node, error) { + return &model.Node{ + ID: string(node.Id), + Name: node.Name, + Health: health.NodeFromString(string(node.Health)), + Capacity: DecodeCapacityStats(node.Capacity), + + Labels: node.Labels, + + IOAddr: node.IoEndpoint, + SupervisorAddr: node.SupervisorEndpoint, + GossipAddr: node.GossipEndpoint, + ClusteringAddr: node.ClusteringEndpoint, + + Cordoned: node.Cordoned, + CordonedAt: node.CordonedAt, + + CreatedAt: node.CreatedAt, + UpdatedAt: node.UpdatedAt, + Version: node.Version, + }, nil +} + +func DecodeVolume(vol openapi.Volume) (*model.Volume, error) { + v := &model.Volume{ + ID: string(vol.Id), + Name: vol.Name, + Description: vol.Description, + SizeBytes: vol.SizeBytes, + AttachedOn: string(vol.AttachedOn), + AttachmentType: model.AttachTypeFromString(string(vol.AttachmentType)), + Nfs: DecodeNFSConfig(vol.Nfs), + + Namespace: string(vol.NamespaceID), + Labels: vol.Labels, + TopologyLabels: model.TopologyLabels(vol.TopologyLabels), + Filesystem: model.FsTypeFromString(string(vol.FsType)), + + CreatedAt: vol.CreatedAt, + UpdatedAt: vol.UpdatedAt, + Version: vol.Version, + } + + m := vol.Master + v.Master = &model.Deployment{ + ID: m.Id, + Node: string(m.NodeID), + Health: health.MasterFromString(string(m.Health)), + Promotable: m.Promotable, + } + + replicas := []*model.Deployment{} + + if vol.Replicas != nil { + replicas = make([]*model.Deployment, len(*vol.Replicas)) + for i, r := range *vol.Replicas { + replicas[i] = &model.Deployment{ + ID: r.Id, + Node: string(r.NodeID), + Health: health.ReplicaFromString(string(r.Health)), + Promotable: r.Promotable, + } + + p := r.SyncProgress + + if (p != openapi.SyncProgress{}) { + replicas[i].SyncProgress = &model.SyncProgress{ + BytesRemaining: p.BytesRemaining, + ThroughputBytes: p.ThroughputBytes, + EstimatedSecondsRemaining: p.EstimatedSecondsRemaining, + } + } + } + } + + v.Replicas = replicas + + return v, nil +} + +func DecodeNamespace(ns openapi.Namespace) (*model.Namespace, error) { + return &model.Namespace{ + ID: string(ns.Id), + Name: ns.Name, + Labels: ns.Labels, + + CreatedAt: ns.CreatedAt, + UpdatedAt: ns.UpdatedAt, + Version: ns.Version, + }, nil +} + +func DecodePolicyGroup(policyGroup openapi.PolicyGroup) (*model.PolicyGroup, error) { + users := []*model.PolicyGroupMember{} + if policyGroup.Users != nil { + users = make([]*model.PolicyGroupMember, 0, len(policyGroup.Users)) + for _, u := range policyGroup.Users { + users = append(users, &model.PolicyGroupMember{ + ID: string(u.Id), + Username: u.Username, + }) + } + } + + specs := []*model.PolicyGroupSpec{} + if policyGroup.Specs != nil { + specs = make([]*model.PolicyGroupSpec, 0, len(*policyGroup.Specs)) + for _, spec := range *policyGroup.Specs { + specs = append(specs, &model.PolicyGroupSpec{ + NamespaceID: string(spec.NamespaceID), + ResourceType: spec.ResourceType, + ReadOnly: spec.ReadOnly, + }) + } + } + + return &model.PolicyGroup{ + ID: string(policyGroup.Id), + Name: policyGroup.Name, + Users: users, + Specs: specs, + CreatedAt: policyGroup.CreatedAt, + UpdatedAt: policyGroup.UpdatedAt, + Version: policyGroup.Version, + }, nil +} + +func DecodeUser(user openapi.User) (*model.User, error) { + + groups := []string{} + + if user.Groups != nil { + groups = make([]string, len(*user.Groups)) + for i, groupID := range *user.Groups { + groups[i] = string(groupID) + } + } + + return &model.User{ + ID: string(user.Id), + Username: user.Username, + + IsAdmin: user.IsAdmin, + Groups: groups, + + CreatedAt: user.CreatedAt, + UpdatedAt: user.UpdatedAt, + Version: user.Version, + }, nil +} + +func DecodeNFSConfig(nfsConfig openapi.NfsConfig) model.NFSConfig { + cfg := model.NFSConfig{ + Exports: make([]model.NFSExportConfig, 0), + ServiceEndpoint: "", + } + + if nfsConfig.ServiceEndpoint != nil { + cfg.ServiceEndpoint = *nfsConfig.ServiceEndpoint + } + + if nfsConfig.Exports != nil { + for _, e := range *nfsConfig.Exports { + cfg.Exports = append(cfg.Exports, DecodeNFSExportConfig(e)) + } + } + + return cfg +} + +func DecodeNFSExportConfig(nfsExportConfig openapi.NfsExportConfig) model.NFSExportConfig { + cfg := model.NFSExportConfig{ + ExportID: uint(nfsExportConfig.ExportID), + Path: nfsExportConfig.Path, + PseudoPath: nfsExportConfig.PseudoPath, + ACLs: make([]model.NFSExportConfigACL, 0, len(nfsExportConfig.Acls)), + } + + for _, a := range nfsExportConfig.Acls { + cfg.ACLs = append(cfg.ACLs, model.NFSExportConfigACL{ + Identity: model.NFSExportConfigACLIdentity{ + IdentityType: a.Identity.IdentityType, + Matcher: a.Identity.Matcher, + }, + SquashConfig: model.NFSExportConfigACLSquashConfig{ + GID: a.SquashConfig.Gid, + UID: a.SquashConfig.Uid, + Squash: a.SquashConfig.Squash, + }, + AccessLevel: a.AccessLevel, + }) + } + return cfg +} + +func EncodeNFSExport(export model.NFSExportConfig) openapi.NfsExportConfig { + cfg := openapi.NfsExportConfig{ + ExportID: uint64(export.ExportID), + Path: export.Path, + PseudoPath: export.PseudoPath, + Acls: []openapi.NfsAcl{}, + } + + for _, a := range export.ACLs { + cfg.Acls = append(cfg.Acls, openapi.NfsAcl{ + Identity: openapi.NfsAclIdentity{ + IdentityType: a.Identity.IdentityType, + Matcher: a.Identity.Matcher, + }, + SquashConfig: openapi.NfsAclSquashConfig{ + Uid: a.SquashConfig.UID, + Gid: a.SquashConfig.GID, + Squash: a.SquashConfig.Squash, + }, + AccessLevel: a.AccessLevel, + }) + } + + return cfg +} + +func EncodeFsType(filesystem model.FsType) (openapi.FsType, error) { + v := openapi.FsType(filesystem.String()) + switch v { + case openapi.FSTYPE_EXT2, openapi.FSTYPE_EXT3, + openapi.FSTYPE_EXT4, openapi.FSTYPE_XFS, + openapi.FSTYPE_BTRFS, openapi.FSTYPE_BLOCK: + return v, nil + default: + return "", NewEncodingError( + errors.New("unknown fs type"), + v, + filesystem, + ) + } +} + +func EncodeLogLevel(level model.LogLevel) (openapi.LogLevel, error) { + v := openapi.LogLevel(level.String()) + switch v { + case openapi.LOGLEVEL_DEBUG, openapi.LOGLEVEL_INFO, + openapi.LOGLEVEL_WARN, openapi.LOGLEVEL_ERROR: + return v, nil + default: + return "", NewEncodingError( + errors.New("unknown log level"), + v, + level, + ) + } +} + +func EncodeLogFormat(format model.LogFormat) (openapi.LogFormat, error) { + v := openapi.LogFormat(format.String()) + switch v { + case openapi.LOGFORMAT_DEFAULT, openapi.LOGFORMAT_JSON: + return v, nil + default: + return "", NewEncodingError( + errors.New("unknown log format"), + v, + format, + ) + } +} diff --git a/api_client/openapi/codec_test.go b/api_client/openapi/codec_test.go new file mode 100644 index 00000000..fe17696e --- /dev/null +++ b/api_client/openapi/codec_test.go @@ -0,0 +1,775 @@ +package openapi + +import ( + "reflect" + "testing" + "time" + + "github.com/kr/pretty" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" + "github.com/storageos/kubectl-storageos/pkg/health" +) + +func TestDecodeLicence(t *testing.T) { + t.Parallel() + + mockExpiryTime := time.Date(2020, 01, 01, 0, 0, 0, 2, time.UTC) + + tests := []struct { + name string + + model openapi.Licence + + wantResource *model.License + wantErr error + }{ + { + name: "ok", + + model: openapi.Licence{ + ClusterID: "bananas", + ExpiresAt: mockExpiryTime, + ClusterCapacityBytes: 42, + UsedBytes: 42 / 2, + Kind: "mockLicence", + CustomerName: "go testing framework", + Features: &[]string{"nfs"}, + Version: "bananaVersion", + }, + + wantResource: &model.License{ + ClusterID: "bananas", + ExpiresAt: mockExpiryTime, + ClusterCapacityBytes: 42, + UsedBytes: 42 / 2, + Kind: "mockLicence", + CustomerName: "go testing framework", + Features: []string{"nfs"}, + Version: string("bananaVersion"), + }, + wantErr: nil, + }, + { + name: "nil features", + + model: openapi.Licence{ + ClusterID: "bananas", + ExpiresAt: mockExpiryTime, + ClusterCapacityBytes: 42, + UsedBytes: 42 / 2, + Kind: "mockLicence", + CustomerName: "go testing framework", + Features: nil, + Version: "bananaVersion", + }, + + wantResource: &model.License{ + ClusterID: "bananas", + ExpiresAt: mockExpiryTime, + ClusterCapacityBytes: 42, + UsedBytes: 42 / 2, + Kind: "mockLicence", + CustomerName: "go testing framework", + Features: []string{}, + Version: string("bananaVersion"), + }, + wantErr: nil, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gotResource, gotErr := DecodeLicence(tt.model) + + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if !reflect.DeepEqual(gotResource, tt.wantResource) { + pretty.Ldiff(t, gotResource, tt.wantResource) + t.Errorf("got decoded cluster config %v, want %v", pretty.Sprint(gotResource), pretty.Sprint(tt.wantResource)) + } + }) + } +} + +func TestDecodeCluster(t *testing.T) { + t.Parallel() + + mockCreatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 0, time.UTC) + mockUpdatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 1, time.UTC) + + tests := []struct { + name string + + model openapi.Cluster + + wantResource *model.Cluster + wantErr error + }{ + { + name: "ok", + + model: openapi.Cluster{ + Id: "bananas", + + DisableTelemetry: true, + DisableCrashReporting: true, + DisableVersionCheck: true, + LogLevel: openapi.LOGLEVEL_DEBUG, + LogFormat: openapi.LOGFORMAT_JSON, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + + wantResource: &model.Cluster{ + ID: "bananas", + + DisableTelemetry: true, + DisableCrashReporting: true, + DisableVersionCheck: true, + + LogLevel: model.LogLevelFromString("debug"), + LogFormat: model.LogFormatFromString("json"), + + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + wantErr: nil, + }, + { + name: "does not panic with no fields", + model: openapi.Cluster{}, + wantResource: &model.Cluster{}, + wantErr: nil, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gotResource, gotErr := DecodeCluster(tt.model) + + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if !reflect.DeepEqual(gotResource, tt.wantResource) { + pretty.Ldiff(t, gotResource, tt.wantResource) + t.Errorf("got decoded cluster config %v, want %v", pretty.Sprint(gotResource), pretty.Sprint(tt.wantResource)) + } + }) + } +} + +func TestDecodeNode(t *testing.T) { + t.Parallel() + + mockCreatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 0, time.UTC) + mockUpdatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 1, time.UTC) + + tests := []struct { + name string + + model openapi.Node + + wantResource *model.Node + wantErr error + }{ + { + name: "ok", + + model: openapi.Node{ + Id: "banananodeid", + Name: "banananodename", + Health: openapi.NODEHEALTH_ONLINE, + IoEndpoint: "arbitraryIOEndpoint", + SupervisorEndpoint: "arbitrarySupervisorEndpoint", + GossipEndpoint: "arbitraryGossipEndpoint", + ClusteringEndpoint: "arbitraryClusteringEndpoint", + Labels: map[string]string{ + "storageos.com/label": "value", + }, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + + wantResource: &model.Node{ + ID: "banananodeid", + Name: "banananodename", + Health: health.NodeOnline, + + Labels: map[string]string{ + "storageos.com/label": "value", + }, + + IOAddr: "arbitraryIOEndpoint", + SupervisorAddr: "arbitrarySupervisorEndpoint", + GossipAddr: "arbitraryGossipEndpoint", + ClusteringAddr: "arbitraryClusteringEndpoint", + + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + wantErr: nil, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gotResource, gotErr := DecodeNode(tt.model) + + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if !reflect.DeepEqual(gotResource, tt.wantResource) { + pretty.Ldiff(t, gotResource, tt.wantResource) + t.Errorf("got decoded node config %v, want %v", pretty.Sprint(gotResource), pretty.Sprint(tt.wantResource)) + } + }) + } +} + +func TestDecodeVolume(t *testing.T) { + t.Parallel() + + mockCreatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 0, time.UTC) + mockUpdatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 1, time.UTC) + + tests := []struct { + name string + + model openapi.Volume + + wantResource *model.Volume + wantErr error + }{ + { + name: "ok with replicas", + + model: openapi.Volume{ + Id: "my-volume-id", + Name: "my-volume", + Description: "some arbitrary description", + AttachedOn: "some-arbitrary-node-id", + Nfs: openapi.NfsConfig{ + Exports: &[]openapi.NfsExportConfig{ + { + ExportID: 1, + Path: "/", + PseudoPath: "/", + Acls: []openapi.NfsAcl{ + { + Identity: openapi.NfsAclIdentity{ + IdentityType: "cidr", + Matcher: "10.0.0.0/8", + }, + SquashConfig: openapi.NfsAclSquashConfig{ + Gid: 0, + Uid: 0, + Squash: "root", + }, + AccessLevel: "rw", + }, + }, + }, + }, + ServiceEndpoint: func(s string) *string { return &s }("10.0.0.1:/"), + }, + NamespaceID: "some-arbitrary-namespace-id", + Labels: map[string]string{ + "storageos.com/label": "value", + }, + FsType: openapi.FSTYPE_EXT4, + Master: openapi.MasterDeploymentInfo{ + Id: "master-id", + NodeID: "some-arbitrary-node-id", + Health: openapi.MASTERHEALTH_ONLINE, + Promotable: true, + }, + Replicas: &[]openapi.ReplicaDeploymentInfo{ + { + Id: "replica-a-id", + NodeID: "some-second-node-id", + Health: openapi.REPLICAHEALTH_SYNCING, + Promotable: false, + }, + { + Id: "replica-b-id", + NodeID: "some-third-node-id", + Health: openapi.REPLICAHEALTH_READY, + Promotable: true, + }, + }, + SizeBytes: 1337, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + + wantResource: &model.Volume{ + ID: "my-volume-id", + Name: "my-volume", + Description: "some arbitrary description", + SizeBytes: 1337, + + AttachedOn: "some-arbitrary-node-id", + Nfs: model.NFSConfig{ + Exports: []model.NFSExportConfig{ + { + ExportID: 1, + Path: "/", + PseudoPath: "/", + ACLs: []model.NFSExportConfigACL{ + { + Identity: model.NFSExportConfigACLIdentity{ + IdentityType: "cidr", + Matcher: "10.0.0.0/8", + }, + SquashConfig: model.NFSExportConfigACLSquashConfig{ + GID: 0, + UID: 0, + Squash: "root", + }, + AccessLevel: "rw", + }, + }, + }, + }, + ServiceEndpoint: "10.0.0.1:/", + }, + Namespace: "some-arbitrary-namespace-id", + Labels: map[string]string{ + "storageos.com/label": "value", + }, + Filesystem: model.FsTypeFromString("ext4"), + + Master: &model.Deployment{ + ID: "master-id", + Node: "some-arbitrary-node-id", + Health: health.MasterOnline, + Promotable: true, + }, + Replicas: []*model.Deployment{ + { + ID: "replica-a-id", + Node: "some-second-node-id", + Health: health.ReplicaSyncing, + Promotable: false, + }, + { + ID: "replica-b-id", + Node: "some-third-node-id", + Health: health.ReplicaReady, + Promotable: true, + }, + }, + + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + wantErr: nil, + }, + { + name: "ok no replicas", + + model: openapi.Volume{ + Id: "my-volume-id", + Name: "my-volume", + Description: "some arbitrary description", + AttachedOn: "some-arbitrary-node-id", + Nfs: openapi.NfsConfig{ + Exports: &[]openapi.NfsExportConfig{ + { + ExportID: 1, + Path: "/", + PseudoPath: "/", + Acls: []openapi.NfsAcl{ + { + Identity: openapi.NfsAclIdentity{ + IdentityType: "cidr", + Matcher: "10.0.0.0/8", + }, + SquashConfig: openapi.NfsAclSquashConfig{ + Gid: 0, + Uid: 0, + Squash: "root", + }, + AccessLevel: "rw", + }, + }, + }, + }, + ServiceEndpoint: func(s string) *string { return &s }("10.0.0.1:/"), + }, + NamespaceID: "some-arbitrary-namespace-id", + Labels: map[string]string{ + "storageos.com/label": "value", + }, + FsType: openapi.FSTYPE_EXT4, + Master: openapi.MasterDeploymentInfo{ + Id: "master-id", + NodeID: "some-arbitrary-node-id", + Health: openapi.MASTERHEALTH_ONLINE, + Promotable: true, + }, + SizeBytes: 1337, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + + wantResource: &model.Volume{ + ID: "my-volume-id", + Name: "my-volume", + Description: "some arbitrary description", + SizeBytes: 1337, + + AttachedOn: "some-arbitrary-node-id", + Nfs: model.NFSConfig{ + Exports: []model.NFSExportConfig{ + { + ExportID: 1, + Path: "/", + PseudoPath: "/", + ACLs: []model.NFSExportConfigACL{ + { + Identity: model.NFSExportConfigACLIdentity{ + IdentityType: "cidr", + Matcher: "10.0.0.0/8", + }, + SquashConfig: model.NFSExportConfigACLSquashConfig{ + GID: 0, + UID: 0, + Squash: "root", + }, + AccessLevel: "rw", + }, + }, + }, + }, + ServiceEndpoint: "10.0.0.1:/", + }, + Namespace: "some-arbitrary-namespace-id", + Labels: map[string]string{ + "storageos.com/label": "value", + }, + Filesystem: model.FsTypeFromString("ext4"), + + Master: &model.Deployment{ + ID: "master-id", + Node: "some-arbitrary-node-id", + Health: health.MasterOnline, + Promotable: true, + }, + Replicas: []*model.Deployment{}, + + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + wantErr: nil, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gotResource, gotErr := DecodeVolume(tt.model) + + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if !reflect.DeepEqual(gotResource, tt.wantResource) { + pretty.Ldiff(t, gotResource, tt.wantResource) + t.Errorf("got decoded volume config %v, want %v", pretty.Sprint(gotResource), pretty.Sprint(tt.wantResource)) + } + }) + } +} + +func TestDecodeNamespace(t *testing.T) { + t.Parallel() + + mockCreatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 0, time.UTC) + mockUpdatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 1, time.UTC) + + tests := []struct { + name string + + model openapi.Namespace + + wantResource *model.Namespace + wantErr error + }{ + { + name: "ok", + + model: openapi.Namespace{ + Id: "my-namespace-id", + Name: "my-namespace", + Labels: map[string]string{ + "storageos.com/label": "value", + }, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + + wantResource: &model.Namespace{ + ID: "my-namespace-id", + Name: "my-namespace", + Labels: map[string]string{ + "storageos.com/label": "value", + }, + + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + wantErr: nil, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gotResource, gotErr := DecodeNamespace(tt.model) + + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if !reflect.DeepEqual(gotResource, tt.wantResource) { + pretty.Ldiff(t, gotResource, tt.wantResource) + t.Errorf("got decoded namespace config %v, want %v", pretty.Sprint(gotResource), pretty.Sprint(tt.wantResource)) + } + }) + } +} + +func TestDecodePolicyGroup(t *testing.T) { + t.Parallel() + + mockCreatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 0, time.UTC) + mockUpdatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 1, time.UTC) + + tests := []struct { + name string + + model openapi.PolicyGroup + + wantResource *model.PolicyGroup + wantErr error + }{ + { + name: "ok with users and specs", + + model: openapi.PolicyGroup{ + Id: "policy-group-id", + Name: "policy-group-name", + Users: []openapi.PolicyGroupUsers{ + { + Id: "user-id", + Username: "username", + }, + { + Id: "user-id-2", + Username: "username-2", + }, + }, + Specs: &[]openapi.PoliciesIdSpecs{ + { + NamespaceID: "namespace-id", + ResourceType: "resource-type", + ReadOnly: true, + }, + }, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "version", + }, + + wantResource: &model.PolicyGroup{ + ID: "policy-group-id", + Name: "policy-group-name", + Users: []*model.PolicyGroupMember{ + { + ID: "user-id", + Username: "username", + }, + { + ID: "user-id-2", + Username: "username-2", + }, + }, + Specs: []*model.PolicyGroupSpec{ + { + NamespaceID: "namespace-id", + ResourceType: "resource-type", + ReadOnly: true, + }, + }, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "version", + }, + wantErr: nil, + }, + { + name: "ok with no users or specs", + + model: openapi.PolicyGroup{ + Id: "policy-group-id", + Name: "policy-group-name", + Users: nil, + Specs: nil, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "version", + }, + + wantResource: &model.PolicyGroup{ + ID: "policy-group-id", + Name: "policy-group-name", + Users: []*model.PolicyGroupMember{}, + Specs: []*model.PolicyGroupSpec{}, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "version", + }, + wantErr: nil, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gotResource, gotErr := DecodePolicyGroup(tt.model) + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if !reflect.DeepEqual(gotResource, tt.wantResource) { + pretty.Ldiff(t, gotResource, tt.wantResource) + t.Errorf("got decoded policy group %v, want %v", pretty.Sprint(gotResource), pretty.Sprint(tt.wantResource)) + } + }) + } +} + +func TestDecodeUser(t *testing.T) { + t.Parallel() + + mockCreatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 0, time.UTC) + mockUpdatedAtTime := time.Date(2020, 01, 01, 0, 0, 0, 1, time.UTC) + + tests := []struct { + name string + + model openapi.User + + wantResource *model.User + wantErr error + }{ + { + name: "ok with groups", + + model: openapi.User{ + Id: "my-user-id", + Username: "my-username", + IsAdmin: true, + Groups: &[]string{ + "group-a-id", + "group-b-id", + }, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + + wantResource: &model.User{ + ID: "my-user-id", + Username: "my-username", + + IsAdmin: true, + Groups: []string{ + "group-a-id", + "group-b-id", + }, + + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + wantErr: nil, + }, + { + name: "ok no groups", + + model: openapi.User{ + Id: "my-user-id", + Username: "my-username", + IsAdmin: true, + Groups: nil, + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + + wantResource: &model.User{ + ID: "my-user-id", + Username: "my-username", + + IsAdmin: true, + Groups: []string{}, + + CreatedAt: mockCreatedAtTime, + UpdatedAt: mockUpdatedAtTime, + Version: "NDIK", + }, + wantErr: nil, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + gotResource, gotErr := DecodeUser(tt.model) + + if !reflect.DeepEqual(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if !reflect.DeepEqual(gotResource, tt.wantResource) { + pretty.Ldiff(t, gotResource, tt.wantResource) + t.Errorf("got decoded user config %v, want %v", pretty.Sprint(gotResource), pretty.Sprint(tt.wantResource)) + } + }) + } +} diff --git a/api_client/openapi/diagnostics.go b/api_client/openapi/diagnostics.go new file mode 100644 index 00000000..63b0a2aa --- /dev/null +++ b/api_client/openapi/diagnostics.go @@ -0,0 +1,178 @@ +package openapi + +import ( + "context" + "encoding/json" + "errors" + "io" + "net/http" + "strings" + + openapi "github.com/storageos/go-api/autogenerated" +) + +var ( + // errExtractingFilename is an error indicating a filename was not extracted + // from the response header. + errExtractingFilename = errors.New("failed to extract filename from response header") +) + +// GetDiagnostics makes a request to the StorageOS API for a cluster diagnostic +// bundle to be generated and returned to the client. +// +// Because the OpenAPI code generator produces broken code for this method, +// we source the target path, authorization token and http client from it but +// handle the response ourselves. +func (o *OpenAPI) GetDiagnostics(ctx context.Context) (io.ReadCloser, string, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + // Get the appropriate config settings from the openapi client + token := o.client.GetConfig().DefaultHeader["Authorization"] + targetEndpoint := o.client.GetConfig().Scheme + "://" + o.client.GetConfig().Host + "/" + o.client.GetConfig().BasePath + "/diagnostics" + client := o.client.GetConfig().HTTPClient + + // Construct the request + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) + if err != nil { + return nil, "", err + } + + req.Header["Authorization"] = []string{token} + req.Header["Accept"] = []string{"application/octet-stream", "application/gzip", "application/json"} + + resp, err := client.Do(req) + if err != nil { + return nil, "", err + } + + bundleReadCloser, bundleName, err := o.getFileFromResp(resp) + if err != nil { + return nil, "", err + } + + return bundleReadCloser, bundleName, nil +} + +// GetSingleNodeDiagnostics makes a request to the StorageOS API for a single +// node cluster diagnostic bundle to be generated and returned to the client. +// +// Because the OpenAPI code generator produces broken code for this method, we +// source the target path, authorization token and http client from it but +// handle the response ourselves. +func (o *OpenAPI) GetSingleNodeDiagnostics(ctx context.Context, nodeID string) (io.ReadCloser, string, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + // Get the appropriate config settings from the openapi client + token := o.client.GetConfig().DefaultHeader["Authorization"] + targetEndpoint := o.client.GetConfig().Scheme + "://" + o.client.GetConfig().Host + "/" + o.client.GetConfig().BasePath + "/diagnostics" + "/" + nodeID + client := o.client.GetConfig().HTTPClient + + // Construct the request + req, err := http.NewRequestWithContext(ctx, http.MethodGet, targetEndpoint, nil) + if err != nil { + return nil, "", err + } + + req.Header["Authorization"] = []string{token} + req.Header["Accept"] = []string{"application/octet-stream", "application/gzip", "application/json"} + + resp, err := client.Do(req) + if err != nil { + return nil, "", err + } + + bundleReadCloser, bundleName, err := o.getFileFromResp(resp) + if err != nil { + return nil, "", err + } + + return bundleReadCloser, bundleName, nil +} + +func (o *OpenAPI) getFileFromResp(resp *http.Response) (io.ReadCloser, string, error) { + var name string + if extracted, err := getFilenameFromHeader(resp.Header); err == nil { + name = extracted + } + + switch resp.StatusCode { + case http.StatusOK: + // Carry on. + case http.StatusBadGateway: + // Check if the response content-type indicates a partial bundle. That + // is, it has a gzip or octet-stream content type. + for _, value := range resp.Header["Content-Type"] { + switch value { + case "application/gzip", "application/octet-stream": + return nil, "", NewIncompleteDiagnosticsError( + resp.Body, name, + ) + } + } + + // If not, use the normal error handling code. + fallthrough + default: + defer resp.Body.Close() + // Try to read the response body and unmarshal it into an openapi.Error + body, err := io.ReadAll(resp.Body) + if err != nil { + return nil, "", err + } + + var errModel openapi.Error + err = json.Unmarshal(body, &errModel) + if err != nil { + return nil, "", err + } + + // Construct an openAPIError from it and hand off to the + // OpenAPI error mapping code. + return nil, "", mapOpenAPIError( + newOpenAPIError(errModel), + resp, + ) + } + + return resp.Body, name, nil +} + +// getFileNameFromHeader attempts to extract an attachment filename from the +// provided HTTP response header. +func getFilenameFromHeader(header http.Header) (string, error) { + + // Try grab a name from the content disposition header. + // + // Expected form if present is `attachment; filename="some-name.ext"`. + for _, value := range header["Content-Disposition"] { + parts := strings.Split(value, ";") + // If it doesn't split at least in two, can't have filename key and be + // correct + if len(parts) != 2 { + continue + } + + if parts[0] != "attachment" { + continue + } + + parts = strings.Split(parts[1], "=") + // If the second part doesn't split in two on an equals sign, can't be + // correct + if len(parts) != 2 { + continue + } + + if strings.Trim(parts[0], " ") != "filename" { + continue + } + + // Cut quotes and whitespace from head and tail, then break out + name := strings.Trim(parts[1], " \"") + return name, nil + } + + return "", errExtractingFilename +} diff --git a/api_client/openapi/diagnostics_test.go b/api_client/openapi/diagnostics_test.go new file mode 100644 index 00000000..bcf97fed --- /dev/null +++ b/api_client/openapi/diagnostics_test.go @@ -0,0 +1,90 @@ +package openapi + +import ( + "errors" + "net/http" + "testing" +) + +func TestGetFilenameFromHeader(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + + header http.Header + + wantName string + wantErr error + }{ + { + name: "ok, gets name from correct quoted format amid other values", + + header: http.Header{ + "Content-Disposition": []string{ + "bogus", + "attachment; filename=\"bundle.bin\"", + }, + }, + + wantName: "bundle.bin", + wantErr: nil, + }, + { + name: "ok, gets name from badly spaced format amid other values", + + header: http.Header{ + "Content-Disposition": []string{ + "a", + "attachment; filename= \" bundle.gz \" ", + "b", + }, + }, + + wantName: "bundle.gz", + wantErr: nil, + }, + { + name: "err, no attachment header value", + + header: http.Header{ + "Content-Disposition": []string{ + "a", + "b", + }, + }, + + wantName: "", + wantErr: errExtractingFilename, + }, + { + name: "err, attachment missing filename=x", + + header: http.Header{ + "Content-Disposition": []string{ + "a", + "attachment; filename", + "b", + }, + }, + + wantName: "", + wantErr: errExtractingFilename, + }, + } + + for _, tt := range tests { + var tt = tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + gotName, gotErr := getFilenameFromHeader(tt.header) + if !errors.Is(gotErr, tt.wantErr) { + t.Errorf("got error %v, want %v", gotErr, tt.wantErr) + } + + if gotName != tt.wantName { + t.Errorf("got name %v, want %v", gotName, tt.wantName) + } + }) + } +} diff --git a/api_client/openapi/errors.go b/api_client/openapi/errors.go new file mode 100644 index 00000000..d0166c93 --- /dev/null +++ b/api_client/openapi/errors.go @@ -0,0 +1,685 @@ +package openapi + +import ( + "fmt" + "io" + "net/http" + + openapi "github.com/storageos/go-api/autogenerated" +) + +type VolumeExistsError struct { + name string + namespaceID string +} + +func (e VolumeExistsError) Error() string { + return fmt.Sprintf("volume name %v is already in use for namespace with ID %v", e.name, e.namespaceID) +} + +func NewVolumeExistsError(name string, namespaceID string) VolumeExistsError { + return VolumeExistsError{ + name: name, + namespaceID: namespaceID, + } +} + +type InvalidVolumeCreationError struct { + details string +} + +func (e InvalidVolumeCreationError) Error() string { + msg := "volume creation request is invalid" + if e.details != "" { + msg = fmt.Sprintf("%v: %v", msg, e.details) + } + return msg +} + +func NewInvalidVolumeCreationError(details string) InvalidVolumeCreationError { + return InvalidVolumeCreationError{ + details: details, + } +} + +type VolumeNotFoundError struct { + msg string + + uid string + name string +} + +func (e VolumeNotFoundError) Error() string { + return e.msg +} + +func NewVolumeNotFoundError(details string) VolumeNotFoundError { + return VolumeNotFoundError{ + msg: details, + } +} + +func NewVolumeIDNotFoundError(volumeID string) VolumeNotFoundError { + return VolumeNotFoundError{ + msg: fmt.Sprintf("volume with ID %v not found for target namespace", volumeID), + uid: volumeID, + } +} + +// NewVolumeNameNotFoundError returns a VolumeNotFoundError for the volume +// with name, constructing a user friendly message and storing the name inside +// the error. +func NewVolumeNameNotFoundError(name string) VolumeNotFoundError { + return VolumeNotFoundError{ + msg: fmt.Sprintf("volume with name %v not found for target namespace", name), + name: name, + } +} + +// UserExistsError is returned when a user creation request is sent to the +// StorageOS API for an already taken username. +type UserExistsError struct { + username string +} + +// Error returns an error message indicating that a username is already in use. +func (e UserExistsError) Error() string { + return fmt.Sprintf("another user with username %v already exists", e.username) +} + +// NewUserExistsError returns an error indicating that a user already exists +// for username. +func NewUserExistsError(username string) UserExistsError { + return UserExistsError{ + username: username, + } +} + +// InvalidUserCreationError is returned when an user creation request sent to +// the StorageOS API is invalid. +type InvalidUserCreationError struct { + details string +} + +// Error returns an error message indicating that a user creation request +// made to the StorageOS API is invalid, including details if available. +func (e InvalidUserCreationError) Error() string { + msg := "user creation request is invalid" + if e.details != "" { + msg = fmt.Sprintf("%v: %v", msg, e.details) + } + return msg +} + +// NewInvalidUserCreationError returns an InvalidUserCreationError, using +// details to provide information about what must be corrected. +func NewInvalidUserCreationError(details string) InvalidUserCreationError { + return InvalidUserCreationError{ + details: details, + } +} + +type UserNotFoundError struct { + msg string + + uid string + name string +} + +func (e UserNotFoundError) Error() string { + return e.msg +} + +// NewUserNotFoundError returns a UserNotFoundError using details as the +// the error message. This can be used when provided an opaque but detailed +// error strings. +func NewUserNotFoundError(details string, uID string) UserNotFoundError { + return UserNotFoundError{ + msg: details, + uid: uID, + } +} + +// NewUserNameNotFoundError returns a UserNotFoundError for the user +// with name, constructing a user friendly message and storing the name inside +// the error. +func NewUserNameNotFoundError(name string) UserNotFoundError { + return UserNotFoundError{ + msg: fmt.Sprintf("user with name %v not found", name), + name: name, + } +} + +// PolicyGroupNotFoundError indicates that the API could not find the policy +// group specified. +type PolicyGroupNotFoundError struct { + msg string + + gid string + name string +} + +// Error returns an error message indicating that the policy group with a given +// ID or name was not found, as configured. +func (e PolicyGroupNotFoundError) Error() string { + switch { + case e.gid != "": + return fmt.Sprintf("policy group with ID %v not found", e.gid) + case e.name != "": + return fmt.Sprintf("policy group with name %v not found", e.name) + } + + return e.msg +} + +// NewPolicyGroupIDNotFoundError returns a PolicyGroupNotFoundError for the +// policy group with gid, constructing a user friendly message and storing +// the ID inside the error. +func NewPolicyGroupIDNotFoundError(gid string) PolicyGroupNotFoundError { + return PolicyGroupNotFoundError{ + gid: gid, + } +} + +// NewPolicyGroupNameNotFoundError returns a PolicyGroupNotFoundError for the +// policy group with name, constructing a user friendly message and storing +// the name inside the error. +func NewPolicyGroupNameNotFoundError(name string) PolicyGroupNotFoundError { + return PolicyGroupNotFoundError{ + name: name, + } +} + +// PolicyGroupExistsError is returned when a policy group creation request is sent to +// a cluster where that name is already in use. +type PolicyGroupExistsError struct { + name string +} + +// Error returns an error message indicating that a policy group name is already in +// use. +func (e PolicyGroupExistsError) Error() string { + return fmt.Sprintf("policy group name %s is already in use", e.name) +} + +// NewPolicyGroupExistsError returns an error indicating that a policy group with +// that name already exists. +func NewPolicyGroupExistsError(name string) PolicyGroupExistsError { + return PolicyGroupExistsError{ + name: name, + } +} + +// InvalidPolicyGroupCreationError is returned when a policy group creation +// request sent to the StorageOS API is invalid. +type InvalidPolicyGroupCreationError struct { + details string +} + +// Error returns an error message indicating that a policy group creation +// request made to the StorageOS API is invalid, including details if available. +func (e InvalidPolicyGroupCreationError) Error() string { + msg := "policy group creation request is invalid" + if e.details != "" { + msg = fmt.Sprintf("%v: %v", msg, e.details) + } + return msg +} + +// NewInvalidPolicyGroupCreationError returns an InvalidPolicyGroupCreationError, +// using details to provide information about what must be corrected. +func NewInvalidPolicyGroupCreationError(details string) InvalidPolicyGroupCreationError { + return InvalidPolicyGroupCreationError{ + details: details, + } +} + +type NodeNotFoundError struct { + uid string + name string +} + +func (e NodeNotFoundError) Error() string { + switch { + case e.uid != "": + return fmt.Sprintf("node with ID %v not found", e.uid) + case e.name != "": + return fmt.Sprintf("node with name %v not found", e.name) + } + + return "node not found" +} + +func NewNodeNotFoundError(uid string) NodeNotFoundError { + return NodeNotFoundError{ + uid: uid, + } +} + +func NewNodeNameNotFoundError(name string) NodeNotFoundError { + return NodeNotFoundError{ + name: name, + } +} + +// NamespaceExistsError is returned when a namespace creation request is sent to +// a cluster where that name is already in use. +type NamespaceExistsError struct { + name string +} + +func (e NamespaceExistsError) Error() string { + return fmt.Sprintf("namespace name %s is already in use", e.name) +} + +func NewNamespaceExistsError(name string) NamespaceExistsError { + return NamespaceExistsError{ + name: name, + } +} + +type InvalidNamespaceCreationError struct { + details string +} + +// Error returns an error message indicating that a namespace creation request +// made to the StorageOS API is invalid, including details if available. +func (e InvalidNamespaceCreationError) Error() string { + msg := "namespace creation request is invalid" + if e.details != "" { + msg = fmt.Sprintf("%v: %v", msg, e.details) + } + return msg +} + +// NewInvalidNamespaceCreationError returns an InvalidNamespaceCreationError, +// using details to provide information about what must be corrected. +func NewInvalidNamespaceCreationError(details string) InvalidNamespaceCreationError { + return InvalidNamespaceCreationError{ + details: details, + } +} + +type NamespaceNotFoundError struct { + uid string + name string +} + +func (e NamespaceNotFoundError) Error() string { + switch { + case e.uid != "": + return fmt.Sprintf("namespace with ID %v not found", e.uid) + case e.name != "": + return fmt.Sprintf("namespace with name %v not found", e.name) + } + + return "namespace not found" +} + +func NewNamespaceNotFoundError(uid string) NamespaceNotFoundError { + return NamespaceNotFoundError{ + uid: uid, + } +} + +func NewNamespaceNameNotFoundError(name string) NamespaceNotFoundError { + return NamespaceNotFoundError{ + name: name, + } +} + +// StaleWriteError indicates that the target resource for the requested +// operation has been concurrently updated, invalidating the request. The client +// should fetch the latest version of the resource before attempting to perform +// another update. +type StaleWriteError struct { + msg string +} + +func (e StaleWriteError) Error() string { + if e.msg == "" { + return "stale write attempted" + } + return e.msg +} + +func NewStaleWriteError(msg string) StaleWriteError { + return StaleWriteError{ + msg: msg, + } +} + +// LicenceCapabilityError indicates that the requested operation cannot be +// carried out due to a licensing issue with the cluster. +type LicenceCapabilityError struct { + msg string +} + +func (e LicenceCapabilityError) Error() string { + if e.msg == "" { + return "licence capability error" + } + return e.msg +} + +func NewLicenceCapabilityError(msg string) LicenceCapabilityError { + return LicenceCapabilityError{ + msg: msg, + } +} + +// InvalidStateTransitionError indicates that the requested operation cannot +// be performed for the target resource in its current state. +type InvalidStateTransitionError struct { + msg string +} + +func (e InvalidStateTransitionError) Error() string { + if e.msg == "" { + return "target resource is in an invalid state for carrying out the request" + } + return e.msg +} + +func NewInvalidStateTransitionError(msg string) InvalidStateTransitionError { + return InvalidStateTransitionError{ + msg: msg, + } +} + +// LockedError indicates that the requested operation cannot be performed +// because a lock is held for the target resource. +type LockedError struct { + msg string +} + +func (e LockedError) Error() string { + if e.msg == "" { + return "requsted operation cannot be safely completed as the target resource is locked" + } + return e.msg +} + +func NewLockedError(msg string) LockedError { + return LockedError{ + msg: msg, + } +} + +// ServerError indicates that an unrecoverable error occurred while attempting +// to perform the requested operation. +type ServerError struct { + msg string +} + +func (e ServerError) Error() string { + if e.msg == "" { + return "server encountered internal error" + } + return e.msg +} + +func NewServerError(msg string) ServerError { + return ServerError{ + msg: msg, + } +} + +// StoreError indicates that the requested operation could not be performed due +// to a store outage. +type StoreError struct { + msg string +} + +func (e StoreError) Error() string { + if e.msg == "" { + return "server encountered store outage" + } + return e.msg +} + +func NewStoreError(msg string) StoreError { + return StoreError{ + msg: msg, + } +} + +// UnauthorisedError indicates that the requested operation is disallowed +// for the user which the client is authenticated as. +type UnauthorisedError struct { + msg string +} + +func (e UnauthorisedError) Error() string { + if e.msg == "" { + return "authenticated user is not authorised to perform that action" + } + return e.msg +} + +func NewUnauthorisedError(msg string) UnauthorisedError { + return UnauthorisedError{ + msg: msg, + } +} + +// AuthenticationError indicates that the requested operation could not be +// performed for the client due to an issue with the authentication credentials +// provided by the client. +type AuthenticationError struct { + msg string +} + +func (e AuthenticationError) Error() string { + if e.msg == "" { + return "authentication error" + } + return e.msg +} + +func NewAuthenticationError(msg string) AuthenticationError { + return AuthenticationError{ + msg: msg, + } +} + +type IncompleteDiagnosticsError struct { + bundleReadCloser io.ReadCloser + bundleName string +} + +func (e IncompleteDiagnosticsError) Error() string { + return "received an incomplete diagnostic bundle" +} + +// BundleReadCloser returns the read closer for the bundle data associated with the error. +func (e IncompleteDiagnosticsError) BundleReadCloser() (io.ReadCloser, string) { + return e.bundleReadCloser, e.bundleName +} + +// NewIncompleteDiagnosticsError constructs an incomplete diagnostics error for +// the provided bundle read closer. +func NewIncompleteDiagnosticsError(bundleReadCloser io.ReadCloser, bundleName string) IncompleteDiagnosticsError { + return IncompleteDiagnosticsError{ + bundleReadCloser: bundleReadCloser, + bundleName: bundleName, + } +} + +// EncodingError provides a unified error type which transport encoding +// implementations return when given a value that cannot be encoded with the +// target encoding. +type EncodingError struct { + err error + targetType interface{} + value interface{} +} + +func (e EncodingError) Error() string { + return fmt.Sprintf("cannot encode %v as %T: %s", e.value, e.targetType, e.err) +} + +// NewEncodingError wraps err as an encoding error for value into targetType. +func NewEncodingError(err error, targetType, value interface{}) EncodingError { + return EncodingError{ + err: err, + targetType: targetType, + value: value, + } +} + +// badRequestError indicates that the request made by the client is invalid. +type badRequestError struct { + msg string +} + +func (e badRequestError) Error() string { + if e.msg == "" { + return "bad request" + } + return e.msg +} + +func newBadRequestError(msg string) badRequestError { + return badRequestError{ + msg: msg, + } +} + +// notFoundError indicates that a resource involved in carrying out the API +// request was not found. +type notFoundError struct { + msg string +} + +func (e notFoundError) Error() string { + if e.msg == "" { + return "not found" + } + return e.msg +} + +func newNotFoundError(msg string) notFoundError { + return notFoundError{ + msg: msg, + } +} + +// conflictError indicates that the requested operation could not be carried +// out due to a conflict between the current state and the desired state. +type conflictError struct { + msg string +} + +func (e conflictError) Error() string { + if e.msg == "" { + return "conflict" + } + return e.msg +} + +func newConflictError(msg string) conflictError { + return conflictError{ + msg: msg, + } +} + +type openAPIError struct { + inner openapi.Error +} + +func (e openAPIError) Error() string { + return e.inner.Error +} + +func newOpenAPIError(err openapi.Error) openAPIError { + return openAPIError{ + inner: err, + } +} + +// mapOpenAPIError will given err and its corresponding resp attempt to map the +// HTTP error to an application level error. +// +// err is returned as is when any of the following are true: +// +// → resp is nil +// → err is not a GenericOpenAPIError or the unexported openAPIError +// +// Some response codes must be mapped by the caller in order to provide useful +// application level errors: +// +// → http.StatusBadRequest returns a badRequestError, which must have a 1-to-1 +// mapping to a context specific application error +// → http.StatusNotFound returns a notFoundError, which must have a 1-to-1 +// mapping to a context specific application error +// → http.StatusConflict returns a conflictError which must have a 1-to-1 +// mapping to a context specific application error +func mapOpenAPIError(err error, resp *http.Response) error { + if resp == nil { + return err + } + + var details string + switch v := err.(type) { + case openapi.GenericOpenAPIError: + switch model := v.Model().(type) { + case openapi.Error: + details = model.Error + default: + details = string(v.Body()) + } + case openAPIError: + details = v.Error() + default: + return err + } + + switch resp.StatusCode { + + // 4XX + case http.StatusBadRequest: + return newBadRequestError(details) + + case http.StatusUnauthorized: + return NewAuthenticationError(details) + + case http.StatusForbidden: + return NewUnauthorisedError(details) + + case http.StatusNotFound: + return newNotFoundError(details) + + case http.StatusConflict: + return newConflictError(details) + + case http.StatusPreconditionFailed: + return NewStaleWriteError(details) + + case http.StatusUnprocessableEntity: + return NewInvalidStateTransitionError(details) + + case http.StatusLocked: + return NewLockedError(details) + + // TODO(CP-3925): This may need changing to present a friendly error, or + // it may be done up the call stack. + case http.StatusUnavailableForLegalReasons: + return NewLicenceCapabilityError(details) + + // 5XX + case http.StatusInternalServerError: + return NewServerError(details) + + case http.StatusServiceUnavailable: + return NewStoreError(details) + + default: + // If details were obtained from the error, decorate it - even when + // unknown. + if details != "" { + err = fmt.Errorf("%w: %v", err, details) + } + return err + } +} diff --git a/api_client/openapi/licence.go b/api_client/openapi/licence.go new file mode 100644 index 00000000..430fdd0b --- /dev/null +++ b/api_client/openapi/licence.go @@ -0,0 +1,56 @@ +package openapi + +import ( + "context" + + "github.com/antihax/optional" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" +) + +// UpdateLicenceRequestParams contains optional request parameters for a update +// licence operation. +type UpdateLicenceRequestParams struct { + CASVersion string +} + +func (o *OpenAPI) GetLicence(ctx context.Context) (*model.License, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + model, resp, err := o.client.DefaultApi.GetLicence(ctx) + if err != nil { + return nil, mapOpenAPIError(err, resp) + } + + return DecodeLicence(model) +} + +// UpdateLicence sends a new version of the licence to apply to the current +// cluster. It returns the new licence resource if correctly applied. +func (o *OpenAPI) UpdateLicence(ctx context.Context, licence []byte, params *UpdateLicenceRequestParams) (*model.License, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + // default + req := openapi.UpdateLicence{ + Key: string(licence), + } + opts := &openapi.UpdateLicenceOpts{ + IgnoreVersion: optional.NewBool(true), + } + + // check optional params + if params != nil && params.CASVersion != "" { + req.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + + lic, resp, err := o.client.DefaultApi.UpdateLicence(ctx, req, opts) + if err != nil { + return nil, mapOpenAPIError(err, resp) + } + + return DecodeLicence(lic) +} diff --git a/api_client/openapi/namespace.go b/api_client/openapi/namespace.go new file mode 100644 index 00000000..d4e4cb3f --- /dev/null +++ b/api_client/openapi/namespace.go @@ -0,0 +1,119 @@ +package openapi + +import ( + "context" + + "github.com/antihax/optional" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" +) + +// DeleteNamespaceRequestParams contains optional request parameters for a +// delete namespace operation. +type DeleteNamespaceRequestParams struct { + CASVersion string +} + +func (o *OpenAPI) GetNamespace(ctx context.Context, uid string) (*model.Namespace, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + model, resp, err := o.client.DefaultApi.GetNamespace(ctx, uid) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewNamespaceNotFoundError(uid) + default: + return nil, v + } + } + + return DecodeNamespace(model) +} + +func (o *OpenAPI) ListNamespaces(ctx context.Context) ([]*model.Namespace, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + models, resp, err := o.client.DefaultApi.ListNamespaces(ctx) + if err != nil { + return nil, mapOpenAPIError(err, resp) + } + + namespaces := make([]*model.Namespace, len(models)) + for i, m := range models { + ns, err := DecodeNamespace(m) + if err != nil { + return nil, err + } + + namespaces[i] = ns + } + + return namespaces, nil +} + +func (o *OpenAPI) CreateNamespace(ctx context.Context, name string, labels map[string]string) (*model.Namespace, error) { + createData := openapi.CreateNamespaceData{ + Name: name, + Labels: labels, + } + + model, resp, err := o.client.DefaultApi.CreateNamespace(ctx, createData) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case badRequestError: + return nil, NewInvalidNamespaceCreationError(v.msg) + case conflictError: + return nil, NewNamespaceExistsError(name) + default: + return nil, v + } + } + + return DecodeNamespace(model) +} + +// DeleteNamespace makes a delete request for a namespace given its ID. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the delete request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (o *OpenAPI) DeleteNamespace(ctx context.Context, uid string, params *DeleteNamespaceRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + } + + resp, err := o.client.DefaultApi.DeleteNamespace( + ctx, + uid, + casVersion, + &openapi.DeleteNamespaceOpts{ + IgnoreVersion: ignoreVersion, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewNamespaceNotFoundError(uid) + default: + return v + } + } + + return nil +} diff --git a/api_client/openapi/node.go b/api_client/openapi/node.go new file mode 100644 index 00000000..1a51baa3 --- /dev/null +++ b/api_client/openapi/node.go @@ -0,0 +1,170 @@ +package openapi + +import ( + "context" + "time" + + "github.com/antihax/optional" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" +) + +// DeleteNodeRequestParams contains optional request parameters for a delete +// node operation. +type DeleteNodeRequestParams struct { + CASVersion string + AsyncMax time.Duration +} + +// SetCordonedRequestParams contains the required and optional parameteres +// for a set cordoned operation +type SetCordonedRequestParams struct { + Cordoned bool + CASVersion string +} + +func (o *OpenAPI) GetNode(ctx context.Context, uid string) (*model.Node, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + model, resp, err := o.client.DefaultApi.GetNode(ctx, uid) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewNodeNotFoundError(uid) + default: + return nil, v + } + } + + return DecodeNode(model) +} + +func (o *OpenAPI) ListNodes(ctx context.Context) ([]*model.Node, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + models, resp, err := o.client.DefaultApi.ListNodes(ctx) + if err != nil { + return nil, mapOpenAPIError(err, resp) + } + + nodes := make([]*model.Node, len(models)) + for i, m := range models { + n, err := DecodeNode(m) + if err != nil { + return nil, err + } + + nodes[i] = n + } + + return nodes, nil +} + +// DeleteNode makes a delete request for nodeID. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the delete request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the node entity's version as seen by the server. +// +// Asynchrony: +// - If params is nil or params.AsyncMax is empty/zero valued then the delete +// request is performed synchronously. +// - If params.AsyncMax is set, the request is performed asynchronously using +// the duration given as the maximum amount of time allowed for the request +// before it times out. +func (o *OpenAPI) DeleteNode(ctx context.Context, nodeID string, params *DeleteNodeRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + var asyncMax optional.String = optional.EmptyString() + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + + if params.AsyncMax != 0 { + asyncMax = optional.NewString(params.AsyncMax.String()) + } + } + + resp, err := o.client.DefaultApi.DeleteNode( + ctx, + nodeID, + casVersion, + &openapi.DeleteNodeOpts{ + IgnoreVersion: ignoreVersion, + AsyncMax: asyncMax, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewNodeNotFoundError(nodeID) + default: + return v + } + } + + return nil +} + +// SetCordoned makes a set cordoned request for nodeID. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the delete request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the node entity's version as seen by the server. +// +// Cordoned: +// - If true marks the node as cordoned +// - If false marks the node as not cordoned +func (o *OpenAPI) SetCordoned(ctx context.Context, nodeID string, params *SetCordonedRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + } + _, resp, err := o.client.DefaultApi.SetCordoned( + ctx, + nodeID, + openapi.SetCordonedNodeData{ + Cordoned: params.Cordoned, + Version: casVersion, + }, + &openapi.SetCordonedOpts{ + IgnoreVersion: ignoreVersion, + }, + ) + + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewNodeNotFoundError(nodeID) + default: + return v + } + } + + return nil +} diff --git a/api_client/openapi/openapi.go b/api_client/openapi/openapi.go new file mode 100644 index 00000000..96a1b1ad --- /dev/null +++ b/api_client/openapi/openapi.go @@ -0,0 +1,121 @@ +package openapi + +import ( + "context" + "errors" + "math" + "strings" + "sync" + "time" + + openapi "github.com/storageos/go-api/autogenerated" +) + +// OpenAPI provides functionality for consuming the REST API exposed by +// StorageOS, implemented with a client generated from the OpenAPI +// specification. +// +// The codec stored on the type is responsible for translating the returned +// OpenAPI models into the internal types. +type OpenAPI struct { + mu *sync.RWMutex + + client *openapi.APIClient +} + +// Authenticate attempts to authenticate against the target API using username +// and password. If successful, o's underlying OpenAPI client will use the +// returned token in the Authorization header for future operations. +// +// Returns the session token and when it expires. +func (o *OpenAPI) Authenticate(ctx context.Context, username, password string) (string, time.Time, error) { + o.mu.Lock() + defer o.mu.Unlock() + + userSession, resp, err := o.client.DefaultApi.AuthenticateUser( + ctx, + openapi.AuthUserData{ + Username: username, + Password: password, + }, + ) + if err != nil { + return "", time.Now().Add(-time.Minute), mapOpenAPIError(err, resp) + } + + token := userSession.Session.Token + // If the token was not decoded from the response body then check the header. + if token == "" { + token = strings.TrimPrefix(resp.Header.Get("Authorization"), "Bearer ") + } + + // Set the authorization header to use the token. + o.client.GetConfig().AddDefaultHeader("Authorization", token) + + var expiresIn time.Duration + + if userSession.Session.ExpiresInSeconds >= uint64(math.MaxInt64/time.Second) { + expiresIn = math.MaxInt64 + } else { + expiresIn = time.Duration(userSession.Session.ExpiresInSeconds) * time.Second + } + + return token, time.Now().Add(expiresIn), nil +} + +// UseAuthSession configures o to use the provided authentication session for +// future requests. Session must contain a non-empty token, but no clock based +// checks are performed. +func (o *OpenAPI) UseAuthSession(ctx context.Context, sessionToken string) error { + o.mu.Lock() + defer o.mu.Unlock() + + if sessionToken == "" { + return errors.New("auth session has no token") + } + + // Set the authorization header to use the token. + o.client.GetConfig().AddDefaultHeader("Authorization", sessionToken) + return nil +} + +// NewOpenAPI initialises a new OpenAPI transport using config to source the +// target host endpoints and userAgent as the HTTP user agent string. +func NewOpenAPI(hosts []string, userAgent string) (*OpenAPI, error) { + if len(hosts) == 0 { + return nil, errors.New("unable to determine target host") + } + + // TODO(CP-3924): This is not good - fix how we get API endpoints from the config. + // This should be done as part of the work in supporting multiple endpoints. + parts := strings.Split(hosts[0], "://") + switch len(parts) { + case 1: + parts = []string{"http", parts[0]} + case 2: + default: + return nil, errors.New("unable to parse target host") + } + + // Create the OpenAPI client configuration + // and initialise. + apiCfg := &openapi.Configuration{ + BasePath: "v2", + DefaultHeader: map[string]string{}, + // TODO(CP-3924): For now the CLI supports only sending requests to the + // first host provided. There should be a way to utilise multiple + // hosts. + Host: parts[1], + // TODO(CP-3913): Support TLS. + Scheme: parts[0], + UserAgent: userAgent, + } + + client := openapi.NewAPIClient(apiCfg) + + return &OpenAPI{ + mu: &sync.RWMutex{}, + + client: client, + }, nil +} diff --git a/api_client/openapi/policygroup.go b/api_client/openapi/policygroup.go new file mode 100644 index 00000000..f61aeb5b --- /dev/null +++ b/api_client/openapi/policygroup.go @@ -0,0 +1,128 @@ +package openapi + +import ( + "context" + + "github.com/antihax/optional" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" +) + +// DeletePolicyGroupRequestParams contains optional request parameters for a +// delete policy group operation. +type DeletePolicyGroupRequestParams struct { + CASVersion string +} + +func (o *OpenAPI) CreatePolicyGroup(ctx context.Context, name string, specs []*model.PolicyGroupSpec) (*model.PolicyGroup, error) { + slice := make([]openapi.PoliciesSpecs, 0, len(specs)) + for _, s := range specs { + slice = append(slice, openapi.PoliciesSpecs{ + NamespaceID: s.NamespaceID, + ResourceType: s.ResourceType, + ReadOnly: s.ReadOnly, + }) + } + + createData := openapi.CreatePolicyGroupData{ + Name: name, + Specs: &slice, + } + + model, resp, err := o.client.DefaultApi.CreatePolicyGroup(ctx, createData) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case badRequestError: + return nil, NewInvalidPolicyGroupCreationError(v.msg) + case conflictError: + return nil, NewPolicyGroupExistsError(name) + default: + return nil, v + } + } + + return DecodePolicyGroup(model) +} + +func (o *OpenAPI) GetPolicyGroup(ctx context.Context, uid string) (*model.PolicyGroup, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + model, resp, err := o.client.DefaultApi.GetPolicyGroup(ctx, uid) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewPolicyGroupIDNotFoundError(uid) + default: + return nil, v + } + } + + return DecodePolicyGroup(model) +} + +func (o *OpenAPI) ListPolicyGroups(ctx context.Context) ([]*model.PolicyGroup, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + models, resp, err := o.client.DefaultApi.ListPolicyGroups(ctx) + if err != nil { + return nil, mapOpenAPIError(err, resp) + } + + policyGroups := make([]*model.PolicyGroup, 0, len(models)) + for _, m := range models { + g, err := DecodePolicyGroup(m) + if err != nil { + return nil, err + } + + policyGroups = append(policyGroups, g) + } + + return policyGroups, nil +} + +// DeletePolicyGroup makes a delete request for a policy group given its ID. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the delete request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (o *OpenAPI) DeletePolicyGroup(ctx context.Context, uid string, params *DeletePolicyGroupRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + } + + resp, err := o.client.DefaultApi.DeletePolicyGroup( + ctx, + uid, + casVersion, + &openapi.DeletePolicyGroupOpts{ + IgnoreVersion: ignoreVersion, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewPolicyGroupIDNotFoundError(uid) + default: + return v + } + } + + return nil +} diff --git a/api_client/openapi/user.go b/api_client/openapi/user.go new file mode 100644 index 00000000..58752a42 --- /dev/null +++ b/api_client/openapi/user.go @@ -0,0 +1,122 @@ +package openapi + +import ( + "context" + + "github.com/antihax/optional" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" +) + +// DeleteUserRequestParams contains optional request parameters for a +// delete user operation. +type DeleteUserRequestParams struct { + CASVersion string +} + +func (o *OpenAPI) CreateUser(ctx context.Context, username, password string, withAdmin bool, groups ...string) (*model.User, error) { + + gs := make([]string, len(groups)) + copy(gs, groups) + + createData := openapi.CreateUserData{ + Username: username, + Password: password, + IsAdmin: withAdmin, + Groups: &gs, + } + + model, resp, err := o.client.DefaultApi.CreateUser(ctx, createData) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case badRequestError: + return nil, NewInvalidUserCreationError(v.msg) + case conflictError: + return nil, NewUserExistsError(username) + default: + return nil, v + } + } + + return DecodeUser(model) +} + +func (o *OpenAPI) GetUser(ctx context.Context, uID string) (*model.User, error) { + model, resp, err := o.client.DefaultApi.GetUser(ctx, uID) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewUserNotFoundError(v.msg, uID) + default: + return nil, v + } + } + + return DecodeUser(model) +} + +func (o *OpenAPI) ListUsers(ctx context.Context) ([]*model.User, error) { + list, resp, err := o.client.DefaultApi.ListUsers(ctx) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + default: + return nil, v + } + } + + users := make([]*model.User, 0, len(list)) + + for _, u := range list { + u, err := DecodeUser(u) + if err != nil { + return nil, err + } + users = append(users, u) + } + + return users, nil +} + +// DeleteUser makes a delete request for a user given its ID. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the delete request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (o *OpenAPI) DeleteUser(ctx context.Context, uid string, params *DeleteUserRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + } + + resp, err := o.client.DefaultApi.DeleteUser( + ctx, + uid, + casVersion, + &openapi.DeleteUserOpts{ + IgnoreVersion: ignoreVersion, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewUserNotFoundError(v.msg, uid) + default: + return v + } + } + + return nil +} diff --git a/api_client/openapi/volume.go b/api_client/openapi/volume.go new file mode 100644 index 00000000..ebfdc0b1 --- /dev/null +++ b/api_client/openapi/volume.go @@ -0,0 +1,808 @@ +package openapi + +import ( + "context" + "time" + + "github.com/antihax/optional" + + openapi "github.com/storageos/go-api/autogenerated" + "github.com/storageos/kubectl-storageos/model" +) + +// CreateVolumeRequestParams contains optional request parameters for a create +// volume operation. +type CreateVolumeRequestParams struct { + AsyncMax time.Duration +} + +// DeleteVolumeRequestParams contains optional request parameters for a delete +// volume operation. +type DeleteVolumeRequestParams struct { + CASVersion string + AsyncMax time.Duration + OfflineDelete bool +} + +// AttachNFSVolumeRequestParams contains optional request parameters for an +// Attach NFS volume operation. +type AttachNFSVolumeRequestParams struct { + CASVersion string + AsyncMax time.Duration +} + +// UpdateNFSVolumeExportsRequestParams contains optional request parameters for +// an Update NFS volume exports operation. +type UpdateNFSVolumeExportsRequestParams struct { + CASVersion string + AsyncMax time.Duration +} + +// UpdateNFSVolumeMountEndpointRequestParams contains optional request +// parameters for an Update NFS volume mount endpoint operation. +type UpdateNFSVolumeMountEndpointRequestParams struct { + CASVersion string + AsyncMax time.Duration +} + +// DetachVolumeRequestParams contains optional request parameters for a detach +// volume operation. +type DetachVolumeRequestParams struct { + CASVersion string + AsyncMax time.Duration +} + +// SetReplicasRequestParams contains optional request parameters for a set +// replicas volume operation. +type SetReplicasRequestParams struct { + CASVersion string +} + +// UpdateVolumeRequestParams contains optional request parameters for a set +// description or set labels volume operation. +type UpdateVolumeRequestParams struct { + CASVersion string + AsyncMax time.Duration +} + +// AddDeploymentOnNodeRequestParams defines the body of the request for this +// operation. +type AddDeploymentOnNodeRequestParams struct { + NodeID string + CASVersion string +} + +// SetPreferredEvictionCandidatesRequestParams defines the body of the request +// for this operation. +type SetPreferredEvictionCandidatesRequestParams struct { + DeploymentIDs []string + CASVersion string +} + +// ResizeVolumeRequestParams contains request parameters for a resize +// volume operation. +type ResizeVolumeRequestParams struct { + AsyncMax time.Duration + CASVersion string +} + +// SetFailureModeRequestParams contains request parameters for a set failure +// mode volume operation. +type SetFailureModeRequestParams struct { + CASVersion string +} + +// CreateVolume requests the creation of a new volume through the StorageOS API +// using the provided parameters. +// +// The behaviour of the operation is dictated by params: +// +// Asynchrony: +// - If params is nil or params.AsyncMax is empty/zero valued then the create +// request is performed synchronously. +// - If params.AsyncMax is set, the request is performed asynchronously using +// the duration given as the maximum amount of time allowed for the request +// before it times out. +func (o *OpenAPI) CreateVolume(ctx context.Context, namespace string, name, description string, fs model.FsType, sizeBytes uint64, labels map[string]string, params *CreateVolumeRequestParams) (*model.Volume, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + fsType, err := EncodeFsType(fs) + if err != nil { + return nil, err + } + + var asyncMax optional.String = optional.EmptyString() + + if params != nil { + if params.AsyncMax != 0 { + asyncMax = optional.NewString(params.AsyncMax.String()) + } + } + + createData := openapi.CreateVolumeData{ + NamespaceID: namespace, + Labels: labels, + Name: name, + FsType: fsType, + Description: description, + SizeBytes: sizeBytes, + } + + model, resp, err := o.client.DefaultApi.CreateVolume( + ctx, + namespace, + createData, + &openapi.CreateVolumeOpts{ + AsyncMax: asyncMax, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case badRequestError: + return nil, NewInvalidVolumeCreationError(v.msg) + case conflictError: + return nil, NewVolumeExistsError(name, namespace) + default: + return nil, v + } + } + + return DecodeVolume(model) +} + +func (o *OpenAPI) GetVolume(ctx context.Context, namespaceID string, uid string) (*model.Volume, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + model, resp, err := o.client.DefaultApi.GetVolume(ctx, namespaceID, uid) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewVolumeNotFoundError(v.msg) + default: + return nil, v + } + } + + return DecodeVolume(model) +} + +func (o *OpenAPI) ListVolumes(ctx context.Context, namespaceID string) ([]*model.Volume, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + models, resp, err := o.client.DefaultApi.ListVolumes(ctx, namespaceID) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewNamespaceNotFoundError(namespaceID) + default: + return nil, v + } + } + + volumes := make([]*model.Volume, len(models)) + for i, m := range models { + v, err := DecodeVolume(m) + if err != nil { + return nil, err + } + + volumes[i] = v + } + + return volumes, nil +} + +// DeleteVolume makes a delete request for volumeID in namespaceID. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the delete request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +// +// Asynchrony: +// - If params is nil or params.AsyncMax is empty/zero valued then the delete +// request is performed synchronously. +// - If params.AsyncMax is set, the request is performed asynchronously using +// the duration given as the maximum amount of time allowed for the request +// before it times out. +// +// Offline deletion behaviour: +// - If params is nil then offline deletion behaviour is not requested, +// otherwise the value of params.OfflineDelete determines is used. The default +// value of false reflects normal deletion behaviour, so does not need setting +// unless offline deletion behaviour is desired. +func (o *OpenAPI) DeleteVolume(ctx context.Context, namespaceID string, volumeID string, params *DeleteVolumeRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + var asyncMax optional.String = optional.EmptyString() + var offlineDelete optional.Bool = optional.NewBool(false) + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + + if params.AsyncMax != 0 { + asyncMax = optional.NewString(params.AsyncMax.String()) + } + + offlineDelete = optional.NewBool(params.OfflineDelete) + } + + resp, err := o.client.DefaultApi.DeleteVolume( + ctx, + namespaceID, + volumeID, + casVersion, + &openapi.DeleteVolumeOpts{ + IgnoreVersion: ignoreVersion, + AsyncMax: asyncMax, + OfflineDelete: offlineDelete, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(v.msg) + default: + return v + } + } + + return nil +} + +// AttachVolume request to attach the volume `volumeID` in the namespace +// `namespaceID` to the node `nodeID`. It can return an error or nil if it +// succeeds. +func (o *OpenAPI) AttachVolume(ctx context.Context, namespaceID string, volumeID string, nodeID string) error { + o.mu.RLock() + defer o.mu.RUnlock() + + resp, err := o.client.DefaultApi.AttachVolume( + ctx, + namespaceID, + volumeID, + openapi.AttachVolumeData{ + NodeID: nodeID, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(v.msg) + default: + return v + } + } + + return nil +} + +// AttachNFSVolume request to attach the volume `volumeID` in the namespace +// `namespaceID` for a NFS volume. It can return an error or nil if it +// succeeds. +func (o *OpenAPI) AttachNFSVolume(ctx context.Context, namespaceID string, volumeID string, params *AttachNFSVolumeRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + var asyncMax optional.String = optional.EmptyString() + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + + if params.AsyncMax != 0 { + asyncMax = optional.NewString(params.AsyncMax.String()) + } + } + + resp, err := o.client.DefaultApi.AttachNFSVolume( + ctx, + namespaceID, + volumeID, + openapi.AttachNfsVolumeData{ + Version: casVersion, + }, + &openapi.AttachNFSVolumeOpts{ + IgnoreVersion: ignoreVersion, + AsyncMax: asyncMax, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(v.msg) + default: + return v + } + } + + return nil +} + +// UpdateNFSVolumeExports request to update the NFS volume exports of `volumeID` +// in the namespace `namespaceID`. It can return an error or nil if it succeeds. +func (o *OpenAPI) UpdateNFSVolumeExports( + ctx context.Context, + namespaceID string, + volumeID string, + exports []model.NFSExportConfig, + params *UpdateNFSVolumeExportsRequestParams, +) error { + + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + var asyncMax optional.String = optional.EmptyString() + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + + if params.AsyncMax != 0 { + asyncMax = optional.NewString(params.AsyncMax.String()) + } + } + + openapiExports := make([]openapi.NfsExportConfig, 0, len(exports)) + for _, e := range exports { + openapiExports = append(openapiExports, EncodeNFSExport(e)) + } + + resp, err := o.client.DefaultApi.UpdateNFSVolumeExports( + ctx, + namespaceID, + volumeID, + openapi.NfsVolumeExports{ + Exports: openapiExports, + Version: casVersion, + }, + &openapi.UpdateNFSVolumeExportsOpts{ + IgnoreVersion: ignoreVersion, + AsyncMax: asyncMax, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(v.msg) + default: + return v + } + } + + return nil +} + +// UpdateNFSVolumeMountEndpoint request to update the NFS mount endpoint of +// `volumeID` in the namespace `namespaceID`. It can return an error or nil if +// it succeeds. +func (o *OpenAPI) UpdateNFSVolumeMountEndpoint( + ctx context.Context, + namespaceID string, + volumeID string, + endpoint string, + params *UpdateNFSVolumeMountEndpointRequestParams, +) error { + + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + var ignoreVersion optional.Bool = optional.NewBool(true) + var asyncMax optional.String = optional.EmptyString() + + if params != nil { + if params.CASVersion != "" { + ignoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + + if params.AsyncMax != 0 { + asyncMax = optional.NewString(params.AsyncMax.String()) + } + } + + resp, err := o.client.DefaultApi.UpdateNFSVolumeMountEndpoint( + ctx, + namespaceID, + volumeID, + openapi.NfsVolumeMountEndpoint{ + MountEndpoint: endpoint, + Version: casVersion, + }, + &openapi.UpdateNFSVolumeMountEndpointOpts{ + IgnoreVersion: ignoreVersion, + AsyncMax: asyncMax, + }, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(v.msg) + default: + return v + } + } + + return nil +} + +func (o *OpenAPI) SetFailureModeIntent(ctx context.Context, namespaceID string, volumeID string, intent string, params *SetFailureModeRequestParams) (*model.Volume, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + partialRequest := openapi.SetFailureModeRequest{ + Mode: openapi.FailureModeIntent(intent), + } + + return o.setFailureMode(ctx, namespaceID, volumeID, partialRequest, params) +} + +func (o *OpenAPI) SetFailureThreshold(ctx context.Context, namespaceID string, volumeID string, threshold uint64, params *SetFailureModeRequestParams) (*model.Volume, error) { + o.mu.RLock() + defer o.mu.RUnlock() + + partialRequest := openapi.SetFailureModeRequest{ + FailureThreshold: threshold, + } + + return o.setFailureMode(ctx, namespaceID, volumeID, partialRequest, params) +} + +func (o *OpenAPI) setFailureMode(ctx context.Context, namespaceID string, volumeID string, partialRequest openapi.SetFailureModeRequest, params *SetFailureModeRequestParams) (*model.Volume, error) { + opts := &openapi.SetFailureModeOpts{ + IgnoreVersion: optional.NewBool(true), + } + + if params != nil && params.CASVersion != "" { + partialRequest.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + + model, resp, err := o.client.DefaultApi.SetFailureMode( + ctx, + namespaceID, + volumeID, + partialRequest, + opts, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewVolumeNotFoundError(v.msg) + default: + return nil, v + } + } + + return DecodeVolume(model) +} + +// DetachVolume makes a detach request for volumeID in namespaceID. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the detach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +// +// Asynchrony: +// - If params is nil or params.AsyncMax is empty/zero valued then the delete +// request is performed synchronously. +// - If params.AsyncMax is set, the request is performed asynchronously using +// the duration given as the maximum amount of time allowed for the request +// before it times out. +func (o *OpenAPI) DetachVolume(ctx context.Context, namespaceID string, volumeID string, params *DetachVolumeRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + var casVersion string + + opts := &openapi.DetachVolumeOpts{ + IgnoreVersion: optional.NewBool(true), + AsyncMax: optional.EmptyString(), + } + + // Set the CAS version constraint if provided + if params != nil { + if params.CASVersion != "" { + opts.IgnoreVersion = optional.NewBool(false) + casVersion = params.CASVersion + } + if params.AsyncMax != 0 { + opts.AsyncMax = optional.NewString(params.AsyncMax.String()) + } + } + + resp, err := o.client.DefaultApi.DetachVolume( + ctx, + namespaceID, + volumeID, + casVersion, + opts, + ) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(v.msg) + default: + return v + } + } + + return nil +} + +// SetReplicas changes the number of the replicas of a specified volume. +// Operation is asynchronous, we return nil if the request has been accepted. +func (o *OpenAPI) SetReplicas(ctx context.Context, nsID string, volID string, numReplicas uint64, params *SetReplicasRequestParams) error { + + o.mu.RLock() + defer o.mu.RUnlock() + + // default + request := openapi.SetReplicasRequest{Replicas: numReplicas} + opts := &openapi.SetReplicasOpts{ + IgnoreVersion: optional.NewBool(true), + } + + // check optional params + if params != nil && params.CASVersion != "" { + request.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + + _, resp, err := o.client.DefaultApi.SetReplicas(ctx, nsID, volID, request, opts) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(v.msg) + default: + return v + } + } + + return nil +} + +// UpdateVolume changes the description of a specified volume. +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the detach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +// +// Asynchrony: +// - If params is nil or params.AsyncMax is empty/zero valued then the delete +// request is performed synchronously. +// - If params.AsyncMax is set, the request is performed asynchronously using +// the duration given as the maximum amount of time allowed for the request +// before it times out. +func (o *OpenAPI) UpdateVolume( + ctx context.Context, + nsID string, + volID string, + description string, + labels map[string]string, + params *UpdateVolumeRequestParams, +) (*model.Volume, error) { + + o.mu.RLock() + defer o.mu.RUnlock() + + // default + request := openapi.UpdateVolumeData{ + Labels: labels, + Description: description, + } + opts := &openapi.UpdateVolumeOpts{ + IgnoreVersion: optional.NewBool(true), + AsyncMax: optional.EmptyString(), + } + + // check optional params + if params != nil && params.CASVersion != "" { + request.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + + model, resp, err := o.client.DefaultApi.UpdateVolume(ctx, nsID, volID, request, opts) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewVolumeNotFoundError(v.msg) + default: + return nil, v + } + } + + return DecodeVolume(model) +} + +// ResizeVolume changes the size of a specified volume. +// Operation is asynchronous, we return nil if the request has been accepted. +func (o *OpenAPI) ResizeVolume( + ctx context.Context, + nsID string, + volID string, + sizeBytes uint64, + params *ResizeVolumeRequestParams, +) (*model.Volume, error) { + + o.mu.RLock() + defer o.mu.RUnlock() + + // default + request := openapi.ResizeVolumeRequest{ + SizeBytes: sizeBytes, + } + opts := &openapi.ResizeVolumeOpts{ + AsyncMax: optional.EmptyString(), + IgnoreVersion: optional.Bool{}, + } + + // check optional params + if params != nil { + if params.AsyncMax != 0 { + opts.AsyncMax = optional.NewString(params.AsyncMax.String()) + } + + if params.CASVersion != "" { + request.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + + if params.AsyncMax != 0 { + opts.AsyncMax = optional.NewString(params.AsyncMax.String()) + } + } + + model, resp, err := o.client.DefaultApi.ResizeVolume(ctx, nsID, volID, request, opts) + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return nil, NewVolumeNotFoundError(v.msg) + default: + return nil, v + } + } + + return DecodeVolume(model) +} + +func (o *OpenAPI) EvictReplica(ctx context.Context, namespaceID string, id string, deploymentID string) error { + o.mu.RLock() + defer o.mu.RUnlock() + + resp, err := o.client.DefaultApi.EvictReplica( + ctx, + namespaceID, + id, + deploymentID, + ) + + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(id) + default: + return v + } + } + + return nil +} + +func (o *OpenAPI) AttemptPromotion(ctx context.Context, namespaceID string, id string, deploymentID string) error { + o.mu.RLock() + defer o.mu.RUnlock() + + resp, err := o.client.DefaultApi.AttemptPromotion( + ctx, + namespaceID, + id, + deploymentID, + ) + + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(id) + default: + return v + } + } + + return nil +} + +func (o *OpenAPI) AddDeploymentOnNode(ctx context.Context, namespaceID string, volID string, params *AddDeploymentOnNodeRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + // default + request := openapi.AddDeploymentOnNodeData{} + opts := &openapi.AddDeploymentOnNodeOpts{ + IgnoreVersion: optional.NewBool(true), + } + + // check optional params + if params != nil { + request.NodeID = params.NodeID + + if params.CASVersion != "" { + request.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + } + + _, resp, err := o.client.DefaultApi.AddDeploymentOnNode(ctx, namespaceID, volID, request, opts) + + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(volID) + default: + return v + } + } + + return nil +} + +func (o *OpenAPI) SetPreferredEvictionCandidates(ctx context.Context, namespaceID string, volID string, params *SetPreferredEvictionCandidatesRequestParams) error { + o.mu.RLock() + defer o.mu.RUnlock() + + // default + request := openapi.SetPreferredEvictionCandidatesData{} + opts := &openapi.SetPreferredEvictionCandidatesOpts{ + IgnoreVersion: optional.NewBool(true), + } + + // check optional params + if params != nil { + request.DeploymentIDs = params.DeploymentIDs + + if params.CASVersion != "" { + request.Version = params.CASVersion + opts.IgnoreVersion = optional.NewBool(false) + } + } + + _, resp, err := o.client.DefaultApi.SetPreferredEvictionCandidates(ctx, namespaceID, volID, request, opts) + + if err != nil { + switch v := mapOpenAPIError(err, resp).(type) { + case notFoundError: + return NewVolumeNotFoundError(volID) + default: + return v + } + } + + return nil +} diff --git a/api_client/policygroup.go b/api_client/policygroup.go new file mode 100644 index 00000000..b0f65cae --- /dev/null +++ b/api_client/policygroup.go @@ -0,0 +1,113 @@ +package apiclient + +import ( + "context" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/model" +) + +// GetListPolicyGroupsByUID requests a list containing basic information on each +// policy group configured for the cluster. +// +// The returned list is filtered using gids so that it contains only those +// resources which have a matching GID. Omitting gids will skip the filtering. +func (c *client) GetListPolicyGroupsByUID(ctx context.Context, gids ...string) ([]*model.PolicyGroup, error) { + policyGroups, err := c.ListPolicyGroups(ctx) + if err != nil { + return nil, err + } + + return filterPolicyGroupsForIDs(policyGroups, gids...) +} + +// GetPolicyGroupByName requests a policy group given its name +func (c *client) GetPolicyGroupByName(ctx context.Context, name string) (*model.PolicyGroup, error) { + policyGroups, err := c.ListPolicyGroups(ctx) + if err != nil { + return nil, err + } + + for _, p := range policyGroups { + if p.Name == name { + return p, nil + } + } + + return nil, openapi.NewPolicyGroupNameNotFoundError(name) +} + +// GetListPolicyGroupsByName requests a list containing basic information on each +// policy group configured for the cluster. +// +// The returned list is filtered using name so that it contains only those +// resources which have a matching name. Omitting gids will skip the filtering. +func (c *client) GetListPolicyGroupsByName(ctx context.Context, names ...string) ([]*model.PolicyGroup, error) { + policyGroups, err := c.ListPolicyGroups(ctx) + if err != nil { + return nil, err + } + + return filterPolicyGroupsForNames(policyGroups, names...) +} + +// filterPolicyGroupsForNames will return a subset of policyGroups containing +// resources which have one of the provided names. If names is not provided, +// policyGroups is returned as is. +// +// If there is no resource for a given name then an error is returned, thus +// this is a strict helper. +func filterPolicyGroupsForNames(policyGroups []*model.PolicyGroup, names ...string) ([]*model.PolicyGroup, error) { + // return everything if no filter names given + if len(names) == 0 { + return policyGroups, nil + } + + // implicitly removes also duplicates, if any, in the names list + retrieved := map[string]*model.PolicyGroup{} + for _, g := range policyGroups { + retrieved[g.Name] = g + } + + filtered := make([]*model.PolicyGroup, 0, len(names)) + + for _, name := range names { + g, ok := retrieved[name] + if !ok { + return nil, openapi.NewPolicyGroupNameNotFoundError(name) + } + filtered = append(filtered, g) + } + + return filtered, nil +} + +// filterPolicyGroupsForIDs will return a subset of policyGroups containing +// resources which have one of the provided gids. If gids is not provided, +// policyGroups is returned as is. +// +// If there is no resource for a given gid then an error is returned, thus +// this is a strict helper. +func filterPolicyGroupsForIDs(policyGroups []*model.PolicyGroup, gids ...string) ([]*model.PolicyGroup, error) { + if len(gids) == 0 { + return policyGroups, nil + } + + // implicitly removes also duplicates, if any, in the gids list + retrieved := map[string]*model.PolicyGroup{} + for _, g := range policyGroups { + retrieved[g.ID] = g + } + + filtered := make([]*model.PolicyGroup, 0, len(gids)) + + for _, gid := range gids { + g, ok := retrieved[gid] + if !ok { + return nil, openapi.NewPolicyGroupIDNotFoundError(gid) + } + filtered = append(filtered, g) + } + + return filtered, nil +} diff --git a/api_client/reauth_client.go b/api_client/reauth_client.go new file mode 100644 index 00000000..0fb5ad27 --- /dev/null +++ b/api_client/reauth_client.go @@ -0,0 +1,486 @@ +package apiclient + +import ( + "context" + "errors" + "io" + "time" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/model" +) + +// ClientWithReauth wraps calls to the internal api client with a +// re-authenticate and retry mechanism when an authentication error is +// encountered. +type ClientWithReauth struct { + client + + username, password string +} + +// doWithReauth invokes fn, checking the resultant error. +// +// - If the error is an *AuthenticationError then tr's credentials are +// used to reauthenticate before returning the result from re-invoking fn. +// If any errors occur during reauthentication, they are returned. +// - Otherwise, the original error is returned to the caller. +func (tr *ClientWithReauth) doWithReauth(ctx context.Context, fn func() error) error { + originalErr := fn() + + // If the returned error from fn indicates authentication failure then + // fetch credentials from the provider, reauthenticate and try the request + // one more time. + // + // This will reliably catch a cached auth session expiring. + if errors.As(originalErr, &openapi.AuthenticationError{}) { + // Attempt to reauth with credentials from the provider. + _, _, err := tr.Authenticate(ctx, tr.username, tr.password) + if err != nil { + return err + } + + return fn() + } + + return originalErr +} + +// NewClientWithReauth wraps calls to transport with a retry on authentication +// failure, sourcing username and password from credentials. +func NewClientWithReauth(client client, username, password string) *ClientWithReauth { + return &ClientWithReauth{ + client: client, + username: username, + password: password, + } +} + +// Authenticate is passed through and does not try to reauth. An authentication +// error here cannot be due to a session timeout. +func (tr *ClientWithReauth) Authenticate(ctx context.Context, username, password string) (string, time.Time, error) { + return tr.client.Authenticate(ctx, username, password) +} + +func (tr *ClientWithReauth) UseAuthSession(ctx context.Context, token string) error { + return tr.client.UseAuthSession(ctx, token) +} + +func (tr *ClientWithReauth) GetUser(ctx context.Context, uid string) (*model.User, error) { + var resource *model.User + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.GetUser(ctx, uid) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) GetCluster(ctx context.Context) (*model.Cluster, error) { + var resource *model.Cluster + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.GetCluster(ctx) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) GetLicence(ctx context.Context) (*model.License, error) { + var resource *model.License + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.GetLicence(ctx) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) GetNode(ctx context.Context, nodeID string) (*model.Node, error) { + var resource *model.Node + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.GetNode(ctx, nodeID) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) GetVolume(ctx context.Context, namespaceID string, volumeID string) (*model.Volume, error) { + var resource *model.Volume + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.GetVolume(ctx, namespaceID, volumeID) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) GetNamespace(ctx context.Context, namespaceID string) (*model.Namespace, error) { + var resource *model.Namespace + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.GetNamespace(ctx, namespaceID) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) GetDiagnostics(ctx context.Context) (io.ReadCloser, string, error) { + var diagnosticsReadCloser io.ReadCloser + var diagnosticsName string + err := tr.doWithReauth(ctx, func() error { + var err error + diagnosticsReadCloser, diagnosticsName, err = tr.client.GetDiagnostics(ctx) + + return err + }) + + return diagnosticsReadCloser, diagnosticsName, err +} + +func (tr *ClientWithReauth) GetSingleNodeDiagnostics(ctx context.Context, nodeID string) (io.ReadCloser, string, error) { + var diagnosticsReadCloser io.ReadCloser + var diagnosticsName string + err := tr.doWithReauth(ctx, func() error { + var err error + diagnosticsReadCloser, diagnosticsName, err = tr.client.GetSingleNodeDiagnostics(ctx, nodeID) + + return err + }) + + return diagnosticsReadCloser, diagnosticsName, err +} + +func (tr *ClientWithReauth) GetPolicyGroup(ctx context.Context, uid string) (*model.PolicyGroup, error) { + var resource *model.PolicyGroup + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.GetPolicyGroup(ctx, uid) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) ListNodes(ctx context.Context) ([]*model.Node, error) { + var resources []*model.Node + err := tr.doWithReauth(ctx, func() error { + var err error + resources, err = tr.client.ListNodes(ctx) + + return err + }) + + return resources, err +} + +func (tr *ClientWithReauth) ListVolumes(ctx context.Context, namespaceID string) ([]*model.Volume, error) { + var resources []*model.Volume + err := tr.doWithReauth(ctx, func() error { + var err error + resources, err = tr.client.ListVolumes(ctx, namespaceID) + + return err + }) + + return resources, err +} + +func (tr *ClientWithReauth) ListNamespaces(ctx context.Context) ([]*model.Namespace, error) { + var resources []*model.Namespace + err := tr.doWithReauth(ctx, func() error { + var err error + resources, err = tr.client.ListNamespaces(ctx) + + return err + }) + + return resources, err +} + +func (tr *ClientWithReauth) ListPolicyGroups(ctx context.Context) ([]*model.PolicyGroup, error) { + var resources []*model.PolicyGroup + err := tr.doWithReauth(ctx, func() error { + var err error + resources, err = tr.client.ListPolicyGroups(ctx) + + return err + }) + + return resources, err +} + +func (tr *ClientWithReauth) ListUsers(ctx context.Context) ([]*model.User, error) { + var resources []*model.User + err := tr.doWithReauth(ctx, func() error { + var err error + resources, err = tr.client.ListUsers(ctx) + + return err + }) + + return resources, err +} + +func (tr *ClientWithReauth) CreateUser(ctx context.Context, username, password string, withAdmin bool, groups ...string) (*model.User, error) { + var resource *model.User + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.CreateUser(ctx, username, password, withAdmin, groups...) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) CreateVolume(ctx context.Context, namespaceID string, name, description string, fs model.FsType, sizeBytes uint64, labels map[string]string, params *openapi.CreateVolumeRequestParams) (*model.Volume, error) { + var resource *model.Volume + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.CreateVolume( + ctx, + namespaceID, + name, + description, + fs, + sizeBytes, + labels, + params, + ) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) CreatePolicyGroup(ctx context.Context, name string, specs []*model.PolicyGroupSpec) (*model.PolicyGroup, error) { + var resource *model.PolicyGroup + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.CreatePolicyGroup(ctx, name, specs) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) CreateNamespace(ctx context.Context, name string, labels map[string]string) (*model.Namespace, error) { + var resource *model.Namespace + err := tr.doWithReauth(ctx, func() error { + var err error + resource, err = tr.client.CreateNamespace(ctx, name, labels) + + return err + }) + + return resource, err +} + +func (tr *ClientWithReauth) UpdateCluster(ctx context.Context, c *model.Cluster, params *openapi.UpdateClusterRequestParams) (*model.Cluster, error) { + var updated *model.Cluster + err := tr.doWithReauth(ctx, func() error { + var err error + updated, err = tr.client.UpdateCluster(ctx, c, params) + + return err + }) + + return updated, err +} + +func (tr *ClientWithReauth) UpdateLicence(ctx context.Context, lic []byte, params *openapi.UpdateLicenceRequestParams) (*model.License, error) { + var updated *model.License + err := tr.doWithReauth(ctx, func() error { + var err error + updated, err = tr.client.UpdateLicence(ctx, lic, params) + + return err + }) + + return updated, err +} + +func (tr *ClientWithReauth) DeleteNode(ctx context.Context, nodeID string, params *openapi.DeleteNodeRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.DeleteNode(ctx, nodeID, params) + }) + + return err +} + +func (tr *ClientWithReauth) SetReplicas(ctx context.Context, nsID string, volID string, numReplicas uint64, params *openapi.SetReplicasRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.SetReplicas(ctx, nsID, volID, numReplicas, params) + }) + + return err +} + +func (tr *ClientWithReauth) UpdateVolume(ctx context.Context, nsID string, volID string, description string, labels map[string]string, params *openapi.UpdateVolumeRequestParams) (*model.Volume, error) { + var updated *model.Volume + err := tr.doWithReauth(ctx, func() error { + var err error + updated, err = tr.client.UpdateVolume(ctx, nsID, volID, description, labels, params) + return err + }) + + return updated, err +} + +func (tr *ClientWithReauth) ResizeVolume(ctx context.Context, nsID string, volID string, sizeBytes uint64, params *openapi.ResizeVolumeRequestParams) (*model.Volume, error) { + var updated *model.Volume + err := tr.doWithReauth(ctx, func() error { + var err error + updated, err = tr.client.ResizeVolume(ctx, nsID, volID, sizeBytes, params) + return err + }) + + return updated, err +} + +func (tr *ClientWithReauth) UpdateNFSVolumeExports(ctx context.Context, namespaceID string, volumeID string, exports []model.NFSExportConfig, params *openapi.UpdateNFSVolumeExportsRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.UpdateNFSVolumeExports(ctx, namespaceID, volumeID, exports, params) + }) + + return err +} + +func (tr *ClientWithReauth) UpdateNFSVolumeMountEndpoint(ctx context.Context, namespaceID string, volumeID string, endpoint string, params *openapi.UpdateNFSVolumeMountEndpointRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.UpdateNFSVolumeMountEndpoint(ctx, namespaceID, volumeID, endpoint, params) + }) + + return err +} + +func (tr *ClientWithReauth) SetFailureModeIntent(ctx context.Context, nsID string, volID string, intent string, params *openapi.SetFailureModeRequestParams) (*model.Volume, error) { + var updated *model.Volume + err := tr.doWithReauth(ctx, func() error { + var err error + updated, err = tr.client.SetFailureModeIntent(ctx, nsID, volID, intent, params) + return err + }) + + return updated, err +} + +func (tr *ClientWithReauth) SetFailureThreshold(ctx context.Context, nsID string, volID string, intent uint64, params *openapi.SetFailureModeRequestParams) (*model.Volume, error) { + var updated *model.Volume + err := tr.doWithReauth(ctx, func() error { + var err error + updated, err = tr.client.SetFailureThreshold(ctx, nsID, volID, intent, params) + return err + }) + + return updated, err +} + +func (tr *ClientWithReauth) DeleteVolume(ctx context.Context, namespaceID string, volumeID string, params *openapi.DeleteVolumeRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.DeleteVolume(ctx, namespaceID, volumeID, params) + }) + + return err +} + +func (tr *ClientWithReauth) DeleteNamespace(ctx context.Context, uid string, params *openapi.DeleteNamespaceRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.DeleteNamespace(ctx, uid, params) + }) + + return err +} + +func (tr *ClientWithReauth) DeleteUser(ctx context.Context, uid string, params *openapi.DeleteUserRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.DeleteUser(ctx, uid, params) + }) + + return err +} + +func (tr *ClientWithReauth) DeletePolicyGroup(ctx context.Context, uid string, params *openapi.DeletePolicyGroupRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.DeletePolicyGroup(ctx, uid, params) + }) + + return err +} + +func (tr *ClientWithReauth) AttachVolume(ctx context.Context, namespaceID string, volumeID string, nodeID string) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.AttachVolume(ctx, namespaceID, volumeID, nodeID) + }) + + return err +} + +func (tr *ClientWithReauth) AttachNFSVolume(ctx context.Context, namespaceID string, volumeID string, params *openapi.AttachNFSVolumeRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.AttachNFSVolume(ctx, namespaceID, volumeID, params) + }) + + return err +} + +func (tr *ClientWithReauth) SetCordoned(ctx context.Context, nodeID string, params *openapi.SetCordonedRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.SetCordoned(ctx, nodeID, params) + }) + + return err +} + +func (tr *ClientWithReauth) EvictReplica(ctx context.Context, namespaceID string, id string, deploymentID string) error { + return tr.doWithReauth(ctx, func() error { + return tr.client.EvictReplica(ctx, namespaceID, id, deploymentID) + }) +} + +func (tr *ClientWithReauth) AttemptPromotion(ctx context.Context, namespaceID string, id string, deploymentID string) error { + return tr.doWithReauth(ctx, func() error { + return tr.client.AttemptPromotion(ctx, namespaceID, id, deploymentID) + }) +} + +func (tr *ClientWithReauth) DetachVolume(ctx context.Context, namespaceID string, volumeID string, params *openapi.DetachVolumeRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.DetachVolume(ctx, namespaceID, volumeID, params) + }) + + return err +} + +func (tr *ClientWithReauth) AddDeploymentOnNode(ctx context.Context, namespaceID string, volumeID string, params *openapi.AddDeploymentOnNodeRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.AddDeploymentOnNode(ctx, namespaceID, volumeID, params) + }) + + return err +} + +func (tr *ClientWithReauth) SetPreferredEvictionCandidates(ctx context.Context, namespaceID string, volID string, params *openapi.SetPreferredEvictionCandidatesRequestParams) error { + err := tr.doWithReauth(ctx, func() error { + return tr.client.SetPreferredEvictionCandidates(ctx, namespaceID, volID, params) + }) + + return err +} diff --git a/api_client/user.go b/api_client/user.go new file mode 100644 index 00000000..2a2e6823 --- /dev/null +++ b/api_client/user.go @@ -0,0 +1,69 @@ +package apiclient + +import ( + "context" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/model" +) + +func (c *client) GetUserByName(ctx context.Context, username string) (*model.User, error) { + list, err := c.ListUsers(ctx) + if err != nil { + return nil, err + } + + for _, u := range list { + if u.Username == username { + return u, nil + } + } + + return nil, openapi.NewUserNameNotFoundError(username) +} + +func (c *client) GetListUsersByUID(ctx context.Context, uids []string) ([]*model.User, error) { + list, err := c.ListUsers(ctx) + if err != nil { + return nil, err + } + + toMap := make(map[string]*model.User) + for _, u := range list { + toMap[u.ID] = u + } + + filtered := make([]*model.User, 0) + for _, idVar := range uids { + u, ok := toMap[idVar] + if !ok { + return nil, openapi.NewUserNotFoundError("user not found", idVar) + } + filtered = append(filtered, u) + } + + return filtered, nil +} + +func (c *client) GetListUsersByUsername(ctx context.Context, usernames []string) ([]*model.User, error) { + list, err := c.ListUsers(ctx) + if err != nil { + return nil, err + } + + toMap := make(map[string]*model.User) + for _, u := range list { + toMap[u.Username] = u + } + + filtered := make([]*model.User, 0) + for _, username := range usernames { + u, ok := toMap[username] + if !ok { + return nil, openapi.NewUserNameNotFoundError(username) + } + filtered = append(filtered, u) + } + + return filtered, nil +} diff --git a/api_client/volume.go b/api_client/volume.go new file mode 100644 index 00000000..77242e61 --- /dev/null +++ b/api_client/volume.go @@ -0,0 +1,490 @@ +package apiclient + +import ( + "context" + "errors" + + "golang.org/x/sync/errgroup" + + "github.com/storageos/kubectl-storageos/api_client/openapi" + "github.com/storageos/kubectl-storageos/model" +) + +// GetVolumeByName requests the volume resource which has name in namespace. +// +// The resource model for the API is build around using unique identifiers, +// so this operation is inherently more expensive than the corresponding +// GetVolume() operation. +// +// Retrieving a volume resource by name involves requesting a list of all +// volumes in the namespace from the StorageOS API and returning the first one +// where the name matches. +func (c *client) GetVolumeByName(ctx context.Context, namespaceID string, name string) (*model.Volume, error) { + volumes, err := c.ListVolumes(ctx, namespaceID) + if err != nil { + return nil, err + } + + for _, v := range volumes { + if v.Name == name { + return v, nil + } + } + + return nil, openapi.NewVolumeNameNotFoundError(name) +} + +// GetNamespaceVolumesByUID requests basic information for each volume resource in +// namespace from the StorageOS API. +// +// The returned list is filtered using uids so that it contains only those +// resources which have a matching ID. Omitting uids will skip the filtering. +func (c *client) GetNamespaceVolumesByUID(ctx context.Context, namespaceID string, uids ...string) ([]*model.Volume, error) { + volumes, err := c.ListVolumes(ctx, namespaceID) + if err != nil { + return nil, err + } + + return filterVolumesForUIDs(volumes, uids...) +} + +// GetNamespaceVolumesByName requests basic information for each volume resource in +// namespace from the StorageOS API. +// +// The returned list is filtered using names so that it contains only those +// resources which have a matching name. Omitting names will skip the filtering. +func (c *client) GetNamespaceVolumesByName(ctx context.Context, namespaceID string, names ...string) ([]*model.Volume, error) { + volumes, err := c.ListVolumes(ctx, namespaceID) + if err != nil { + return nil, err + } + + return filterVolumesForNames(volumes, names...) +} + +// GetAllVolumes requests basic information for each volume resource in every +// namespace exposed by the StorageOS API to the authenticated user. +func (c *client) GetAllVolumes(ctx context.Context) ([]*model.Volume, error) { + return c.fetchAllVolumesParallel(ctx) +} + +// fetchAllVolumesParallel requests the list of all namespaces from the +// StorageOS API, then requests the list of volumes within each namespace, +// calling all of them in parallel, returning an aggregate list of the volumes +// returned. +// +// If access is not granted when listing volumes for a retrieved namespace it +// is noted but will not return an error. Only if access is denied for all +// attempts will this return a permissions error. +// +// If any of the call returns an error: +// - the context is canceled so all pending requests are cut +// - this method returns an error +func (c *client) fetchAllVolumesParallel(ctx context.Context) ([]*model.Volume, error) { + namespaces, err := c.ListNamespaces(ctx) + if err != nil { + return nil, err + } + + // The derived Context is canceled the first time a function passed to Go + // returns a non-nil error or the first time Wait returns, whichever occurs + // first. + group, ctx := errgroup.WithContext(ctx) + + results := make(chan []*model.Volume, len(namespaces)) + + for _, ns := range namespaces { + ns := ns + + // Go calls the given function in a new goroutine. + // + // The first call to return a non-nil error cancels the group; its error + // will be returned by Wait. + group.Go(func() error { + + nsvols, err := c.ListVolumes(ctx, ns.ID) + switch { + case err == nil, errors.As(err, &openapi.UnauthorisedError{}): + // For an unauthorised error, ignore - its not fatal to the operation. + default: + return err + } + + results <- nsvols + return nil + }) + } + + // blocks until all function calls from the Go method have returned + if err := group.Wait(); err != nil { + return nil, err + } + + close(results) + + // merge the results + volumes := []*model.Volume{} + for r := range results { + volumes = append(volumes, r...) + } + + return volumes, nil +} + +// filterVolumesForUIDs will return a subset of volumes containing resources +// which have one of the provided uids. If uids is not provided, volumes is +// returned as is. +// +// If there is no resource for a given uid then an error is returned, thus +// this is a strict helper. +func filterVolumesForUIDs(volumes []*model.Volume, uids ...string) ([]*model.Volume, error) { + if len(uids) == 0 { + return volumes, nil + } + + retrieved := map[string]*model.Volume{} + + for _, v := range volumes { + retrieved[v.ID] = v + } + + filtered := make([]*model.Volume, 0, len(uids)) + + for _, idVar := range uids { + v, ok := retrieved[idVar] + if !ok { + return nil, openapi.NewVolumeIDNotFoundError(idVar) + } + filtered = append(filtered, v) + } + + return filtered, nil +} + +// filterVolumesForNames will return a subset of volumes containing resources +// which have one of the provided names. If names is not provided, volumes is +// returned as is. +// +// If there is no resource for a given name then an error is returned, thus +// this is a strict helper. +func filterVolumesForNames(volumes []*model.Volume, names ...string) ([]*model.Volume, error) { + if len(names) == 0 { + return volumes, nil + } + + retrieved := map[string]*model.Volume{} + + for _, v := range volumes { + retrieved[v.Name] = v + } + + filtered := make([]*model.Volume, 0, len(names)) + + for _, name := range names { + v, ok := retrieved[name] + if !ok { + return nil, openapi.NewVolumeNameNotFoundError(name) + } + filtered = append(filtered, v) + } + + return filtered, nil +} + +// SetReplicas sends a new replica count number for updating the selected volume. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the detach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (c *client) SetReplicas(ctx context.Context, nsID string, volID string, numReplicas uint64, params *openapi.SetReplicasRequestParams) error { + if params == nil || params.CASVersion == "" { + v, err := c.GetVolume(ctx, nsID, volID) + if err != nil { + return err + } + + params = &openapi.SetReplicasRequestParams{CASVersion: v.Version} + } + + return c.SetReplicas(ctx, nsID, volID, numReplicas, params) +} + +// UpdateVolumeDescription sends a new description for updating the selected volume. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +// +// +// Asynchrony: +// - If params is nil or params.AsyncMax is empty/zero valued then the create +// request is performed synchronously. +// - If params.AsyncMax is set, the request is performed asynchronously using +// the duration given as the maximum amount of time allowed for the request +// before it times out. +func (c *client) UpdateVolumeDescription(ctx context.Context, nsID string, volID string, description string, params *openapi.UpdateVolumeRequestParams) (*model.Volume, error) { + v, err := c.GetVolume(ctx, nsID, volID) + if err != nil { + return nil, err + } + + newParams := &openapi.UpdateVolumeRequestParams{} + + if params != nil { + if params.CASVersion != "" { + newParams.CASVersion = params.CASVersion + } + if params.AsyncMax != 0 { + newParams.AsyncMax = params.AsyncMax + } + } + + if newParams.CASVersion == "" { + newParams.CASVersion = v.Version + } + + return c.UpdateVolume(ctx, nsID, volID, description, v.Labels, params) +} + +// UpdateVolumeLabels sends a new set of labels for updating the selected volume +// labels. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +// +// +// Asynchrony: +// - If params is nil or params.AsyncMax is empty/zero valued then the create +// request is performed synchronously. +// - If params.AsyncMax is set, the request is performed asynchronously using +// the duration given as the maximum amount of time allowed for the request +// before it times out. +func (c *client) UpdateVolumeLabels(ctx context.Context, nsID string, volID string, labels map[string]string, params *openapi.UpdateVolumeRequestParams) (*model.Volume, error) { + v, err := c.GetVolume(ctx, nsID, volID) + if err != nil { + return nil, err + } + + newParams := &openapi.UpdateVolumeRequestParams{} + + if params != nil { + if params.CASVersion != "" { + newParams.CASVersion = params.CASVersion + } + if params.AsyncMax != 0 { + newParams.AsyncMax = params.AsyncMax + } + } + + if newParams.CASVersion == "" { + newParams.CASVersion = v.Version + } + + return c.UpdateVolume(ctx, nsID, volID, v.Description, labels, params) +} + +// ResizeVolume sends a new volume size for updating the selected volume. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the resize request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (c *client) ResizeVolume(ctx context.Context, nsID string, volID string, sizeBytes uint64, params *openapi.ResizeVolumeRequestParams) (*model.Volume, error) { + + newParams := &openapi.ResizeVolumeRequestParams{} + + if params == nil || params.CASVersion == "" { + v, err := c.GetVolume(ctx, nsID, volID) + if err != nil { + return nil, err + } + newParams.CASVersion = v.Version + } else { + newParams.CASVersion = params.CASVersion + } + + if params != nil && params.AsyncMax != 0 { + newParams.AsyncMax = params.AsyncMax + } + + return c.ResizeVolume(ctx, nsID, volID, sizeBytes, newParams) +} + +// AttachNFSVolume sends a new attach volume request for NFS. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the attach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (c *client) AttachNFSVolume(ctx context.Context, namespaceID string, volumeID string, params *openapi.AttachNFSVolumeRequestParams) error { + + newParams := &openapi.AttachNFSVolumeRequestParams{} + + if params == nil || params.CASVersion == "" { + v, err := c.GetVolume(ctx, namespaceID, volumeID) + if err != nil { + return err + } + newParams.CASVersion = v.Version + } else { + newParams.CASVersion = params.CASVersion + } + + if params != nil && params.AsyncMax != 0 { + newParams.AsyncMax = params.AsyncMax + } + + return c.AttachNFSVolume(ctx, namespaceID, volumeID, newParams) +} + +// UpdateNFSVolumeExports sends a new NFS volume export update request. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the attach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (c *client) UpdateNFSVolumeExports( + ctx context.Context, + namespaceID string, + volumeID string, + exports []model.NFSExportConfig, + params *openapi.UpdateNFSVolumeExportsRequestParams, +) error { + + newParams := &openapi.UpdateNFSVolumeExportsRequestParams{} + + if params == nil || params.CASVersion == "" { + v, err := c.GetVolume(ctx, namespaceID, volumeID) + if err != nil { + return err + } + newParams.CASVersion = v.Version + } else { + newParams.CASVersion = params.CASVersion + } + + if params != nil && params.AsyncMax != 0 { + newParams.AsyncMax = params.AsyncMax + } + + return c.UpdateNFSVolumeExports(ctx, namespaceID, volumeID, exports, newParams) +} + +// UpdateNFSVolumeMountEndpoint sends a new NFS volume mount endpoint update +// request. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the attach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (c *client) UpdateNFSVolumeMountEndpoint( + ctx context.Context, + namespaceID string, + volumeID string, + endpoint string, + params *openapi.UpdateNFSVolumeMountEndpointRequestParams, +) error { + + newParams := &openapi.UpdateNFSVolumeMountEndpointRequestParams{} + + if params == nil || params.CASVersion == "" { + v, err := c.GetVolume(ctx, namespaceID, volumeID) + if err != nil { + return err + } + newParams.CASVersion = v.Version + } else { + newParams.CASVersion = params.CASVersion + } + + if params != nil && params.AsyncMax != 0 { + newParams.AsyncMax = params.AsyncMax + } + + return c.UpdateNFSVolumeMountEndpoint(ctx, namespaceID, volumeID, endpoint, newParams) +} + +// SetFailureModeIntent attempts to set the intent based failure mode for the +// target volume to the behaviour with the provided name. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the attach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (c *client) SetFailureModeIntent( + ctx context.Context, + namespaceID string, + volumeID string, + intent string, + params *openapi.SetFailureModeRequestParams, +) (*model.Volume, error) { + newParams := &openapi.SetFailureModeRequestParams{} + + if params == nil || params.CASVersion == "" { + v, err := c.GetVolume(ctx, namespaceID, volumeID) + if err != nil { + return nil, err + } + newParams.CASVersion = v.Version + } else { + newParams.CASVersion = params.CASVersion + } + + return c.SetFailureModeIntent(ctx, namespaceID, volumeID, intent, newParams) +} + +// SetFailureThreshold attempts to set the failure mode for the target volume +// to the numerical threshold given. +// +// The behaviour of the operation is dictated by params: +// +// Version constraints: +// - If params is nil or params.CASVersion is empty then the attach request is +// unconditional +// - If params.CASVersion is set, the request is conditional upon it matching +// the volume entity's version as seen by the server. +func (c *client) SetFailureThreshold( + ctx context.Context, + namespaceID string, + volumeID string, + threshold uint64, + params *openapi.SetFailureModeRequestParams, +) (*model.Volume, error) { + newParams := &openapi.SetFailureModeRequestParams{} + + if params == nil || params.CASVersion == "" { + v, err := c.GetVolume(ctx, namespaceID, volumeID) + if err != nil { + return nil, err + } + newParams.CASVersion = v.Version + } else { + newParams.CASVersion = params.CASVersion + } + + return c.SetFailureThreshold(ctx, namespaceID, volumeID, threshold, newParams) +} diff --git a/cmd/bundle.go b/cmd/bundle.go index 586f67ec..bd34cafd 100644 --- a/cmd/bundle.go +++ b/cmd/bundle.go @@ -26,7 +26,7 @@ func BundleCmd() *cobra.Command { from a server that can be used to assist when troubleshooting a StorageOS cluster.`, SilenceUsage: true, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { v := viper.GetViper() @@ -51,9 +51,9 @@ from a server that can be used to assist when troubleshooting a StorageOS cluste // hidden in favor of the `insecure-skip-tls-verify` flag cmd.Flags().Bool("allow-insecure-connections", false, "when set, do not verify TLS certs when retrieving spec and reporting results") - cmd.Flags().MarkHidden("allow-insecure-connections") + _ = cmd.Flags().MarkHidden("allow-insecure-connections") - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) viper.SetEnvKeyReplacer(strings.NewReplacer("-", "_")) diff --git a/cmd/completion.go b/cmd/completion.go index e225f7af..5b3ff917 100644 --- a/cmd/completion.go +++ b/cmd/completion.go @@ -16,13 +16,13 @@ var CompletionCmd = &cobra.Command{ Run: func(cmd *cobra.Command, args []string) { switch args[0] { case "bash": - cmd.Root().GenBashCompletion(os.Stdout) + _ = cmd.Root().GenBashCompletion(os.Stdout) case "zsh": - cmd.Root().GenZshCompletion(os.Stdout) + _ = cmd.Root().GenZshCompletion(os.Stdout) case "fish": - cmd.Root().GenFishCompletion(os.Stdout, true) + _ = cmd.Root().GenFishCompletion(os.Stdout, true) case "powershell": - cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) + _ = cmd.Root().GenPowerShellCompletionWithDesc(os.Stdout) } }, } diff --git a/cmd/disable-portal.go b/cmd/disable-portal.go index 6ed55159..d033e4b0 100644 --- a/cmd/disable-portal.go +++ b/cmd/disable-portal.go @@ -56,7 +56,7 @@ func DisablePortalCmd() *cobra.Command { cmd.Flags().String(installer.StosConfigPathFlag, "", "path to look for kubectl-storageos-config.yaml") cmd.Flags().String(installer.StosOperatorNSFlag, consts.NewOperatorNamespace, "namespace of storageos operator") - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) return cmd } @@ -84,7 +84,7 @@ func disablePortalCmd(config *apiv1.KubectlStorageOSConfig, log *logger.Logger) } func setDisablePortalValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") diff --git a/cmd/enable-portal.go b/cmd/enable-portal.go index 07a6afce..f4bd3249 100644 --- a/cmd/enable-portal.go +++ b/cmd/enable-portal.go @@ -56,7 +56,7 @@ func EnablePortalCmd() *cobra.Command { cmd.Flags().String(installer.StosConfigPathFlag, "", "path to look for kubectl-storageos-config.yaml") cmd.Flags().String(installer.StosOperatorNSFlag, consts.NewOperatorNamespace, "namespace of storageos operator") - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) return cmd } @@ -83,7 +83,7 @@ func enablePortalCmd(config *apiv1.KubectlStorageOSConfig, log *logger.Logger) e } func setEnablePortalValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") diff --git a/cmd/install-portal.go b/cmd/install-portal.go index cf7f5556..8ce14dff 100644 --- a/cmd/install-portal.go +++ b/cmd/install-portal.go @@ -65,7 +65,7 @@ func InstallPortalCmd() *cobra.Command { cmd.Flags().String(installer.PortalHTTPSProxyFlag, "", "HTTPS proxy of portal manager") cmd.Flags().Bool(installer.AirGapFlag, false, "install portal manger in an air gapped environment") - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) return cmd } @@ -118,7 +118,7 @@ func installPortalCmd(config *apiv1.KubectlStorageOSConfig, log *logger.Logger) } func setInstallPortalValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") diff --git a/cmd/install.go b/cmd/install.go index 71cb1ba4..3303aa86 100644 --- a/cmd/install.go +++ b/cmd/install.go @@ -101,9 +101,9 @@ func InstallCmd() *cobra.Command { cmd.Flags().Bool(installer.EnableNodeGuardFlag, false, "enable node guard") cmd.Flags().String(installer.NodeGuardEnvFlag, "", "comma delimited string of environment variables for node guard - eg: \"MINIMUM_REPLICAS=2,WATCH_ALL_VOLUMES=true\"") - cmd.Flags().MarkHidden(installer.TestClusterFlag) + _ = cmd.Flags().MarkHidden(installer.TestClusterFlag) - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) return cmd } @@ -214,7 +214,7 @@ func installCmd(config *apiv1.KubectlStorageOSConfig, log *logger.Logger) error } func setInstallValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") diff --git a/cmd/preflight.go b/cmd/preflight.go index 830ee595..d6f1934f 100644 --- a/cmd/preflight.go +++ b/cmd/preflight.go @@ -24,7 +24,7 @@ func PreflightCmd() *cobra.Command { Long: `A preflight check is a set of validations that can and should be run to ensure that a cluster meets the requirements to run StorageOS.`, SilenceUsage: true, PreRun: func(cmd *cobra.Command, args []string) { - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) }, RunE: func(cmd *cobra.Command, args []string) error { v := viper.GetViper() diff --git a/cmd/testapi.go b/cmd/testapi.go new file mode 100644 index 00000000..9310d392 --- /dev/null +++ b/cmd/testapi.go @@ -0,0 +1,80 @@ +package cmd + +import ( + "context" + "fmt" + "time" + + "github.com/spf13/cobra" + apiclient "github.com/storageos/kubectl-storageos/api_client" + "github.com/storageos/kubectl-storageos/pkg/utils" +) + +func TestAPICmd() *cobra.Command { + cmd := &cobra.Command{ + Use: "testapi", + Short: "Does a GetVolumes api request to test", + Args: cobra.MinimumNArgs(0), + SilenceUsage: true, + FParseErrWhitelist: cobra.FParseErrWhitelist{UnknownFlags: true}, + DisableFlagParsing: true, + + RunE: func(cmd *cobra.Command, args []string) error { + // Example of usage of the new api client + + cfg, err := utils.NewClientConfig() + if err != nil { + return err + } + // 1 - get client + client, err := apiclient.GetAPIClient(cfg) + if err != nil { + return err + } + + // 2 - authenticate + ctx, cancel := context.WithTimeout(context.Background(), time.Minute*2) + defer cancel() + + username, password, err := utils.GetAPICredentialsFromSecret(cfg) + if err != nil { + return err + } + _, _, err = client.Authenticate(ctx, username, password) + if err != nil { + return err + } + + // 3 - profit + vols, err := client.GetAllVolumes(ctx) + if err != nil { + return err + } + + fmt.Printf("Got %d volumes back\n", len(vols)) + + vols, err = client.GetAllVolumes(ctx) + if err != nil { + return err + } + + fmt.Printf("Got %d volumes back\n", len(vols)) + + // testing the use of the cached token + _, _, err = client.Authenticate(ctx, username, password) + if err != nil { + return err + } + + vols, err = client.GetAllVolumes(ctx) + if err != nil { + return err + } + + fmt.Printf("Got %d volumes back\n", len(vols)) + return nil + }, + } + + return cmd +} diff --git a/cmd/uninstall-portal.go b/cmd/uninstall-portal.go index 5c1c2112..51eeb211 100644 --- a/cmd/uninstall-portal.go +++ b/cmd/uninstall-portal.go @@ -61,7 +61,7 @@ func UninstallPortalCmd() *cobra.Command { cmd.Flags().String(installer.StosPortalClientSecretYamlFlag, "", "storageos-portal-manager-client-secret.yaml path or url") cmd.Flags().Bool(installer.AirGapFlag, false, "uninstall portal manger in an air gapped environment") - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) return cmd } @@ -110,7 +110,7 @@ func uninstallPortalCmd(config *apiv1.KubectlStorageOSConfig, log *logger.Logger } func setUninstallPortalValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") diff --git a/cmd/uninstall.go b/cmd/uninstall.go index 91f203c3..42c2bd42 100644 --- a/cmd/uninstall.go +++ b/cmd/uninstall.go @@ -74,7 +74,7 @@ func UninstallCmd() *cobra.Command { cmd.Flags().String(installer.EtcdOperatorVersionFlag, "", "version of etcd operator to uninstall") cmd.Flags().String(installer.PortalManagerVersionFlag, "", "version of portal manager to uninstall") - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) return cmd } @@ -138,7 +138,7 @@ func uninstallCmd(config *apiv1.KubectlStorageOSConfig, skipNamespaceDeletionHas } func setUninstallValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") diff --git a/cmd/upgrade.go b/cmd/upgrade.go index 6e85e33d..df4193a1 100644 --- a/cmd/upgrade.go +++ b/cmd/upgrade.go @@ -125,7 +125,7 @@ func UpgradeCmd() *cobra.Command { cmd.Flags().Bool(installer.EnableNodeGuardFlag, false, "enable node guard") cmd.Flags().String(installer.NodeGuardEnvFlag, "", "comma delimited string of environment variables for node guard - eg: \"MINIMUM_REPLICAS=2,WATCH_ALL_VOLUMES=true\"") - viper.BindPFlags(cmd.Flags()) + _ = viper.BindPFlags(cmd.Flags()) return cmd } @@ -185,7 +185,7 @@ func upgradeCmd(uninstallConfig *apiv1.KubectlStorageOSConfig, installConfig *ap } func setUpgradeInstallValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") @@ -305,7 +305,7 @@ func setUpgradeInstallValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSC } func setUpgradeUninstallValues(cmd *cobra.Command, config *apiv1.KubectlStorageOSConfig) error { - viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) + _ = viper.BindPFlag(installer.StosConfigPathFlag, cmd.Flags().Lookup(installer.StosConfigPathFlag)) v := viper.GetViper() viper.SetConfigName("kubectl-storageos-config") viper.SetConfigType("yaml") diff --git a/go.mod b/go.mod index b7bc987c..d8225982 100644 --- a/go.mod +++ b/go.mod @@ -37,6 +37,7 @@ replace ( require ( github.com/ahmetalpbalkan/go-cursor v0.0.0-20131010032410-8136607ea412 + github.com/antihax/optional v1.0.0 github.com/blang/semver v3.5.1+incompatible github.com/coreos/go-semver v0.3.0 github.com/fatih/color v1.13.0 @@ -44,6 +45,7 @@ require ( github.com/google/go-containerregistry v0.12.1 github.com/hashicorp/go-version v1.6.0 github.com/improbable-eng/etcd-cluster-operator v0.2.0 + github.com/kr/pretty v0.3.0 github.com/manifoldco/promptui v0.9.0 github.com/mattn/go-isatty v0.0.16 github.com/mholt/archiver v3.1.1+incompatible @@ -68,6 +70,11 @@ require ( sigs.k8s.io/yaml v1.3.0 ) +require ( + github.com/kr/text v0.2.0 // indirect + github.com/rogpeppe/go-internal v1.6.1 // indirect +) + require ( cloud.google.com/go v0.105.0 // indirect cloud.google.com/go/compute v1.12.1 // indirect @@ -87,6 +94,7 @@ require ( github.com/Microsoft/hcsshim v0.9.5 // indirect github.com/PuerkitoBio/purell v1.1.1 // indirect github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect + github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 github.com/andybalholm/brotli v1.0.4 // indirect github.com/aws/aws-sdk-go v1.43.16 // indirect github.com/beorn7/perks v1.0.1 // indirect @@ -205,6 +213,7 @@ require ( github.com/spf13/afero v1.8.2 // indirect github.com/spf13/cast v1.5.0 // indirect github.com/spf13/jwalterweatherman v1.1.0 // indirect + github.com/storageos/go-api v0.0.0-20221110132555-4ecf9f9b9429 github.com/subosito/gotenv v1.4.1 // indirect github.com/sylabs/sif/v2 v2.8.1 // indirect github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635 // indirect @@ -222,7 +231,7 @@ require ( golang.org/x/mod v0.6.0 // indirect golang.org/x/net v0.1.0 // indirect golang.org/x/oauth2 v0.1.0 // indirect - golang.org/x/sync v0.1.0 // indirect + golang.org/x/sync v0.1.0 golang.org/x/sys v0.1.0 // indirect golang.org/x/term v0.1.0 // indirect golang.org/x/text v0.4.0 // indirect diff --git a/go.sum b/go.sum index 84596250..2b1c3012 100644 --- a/go.sum +++ b/go.sum @@ -143,11 +143,14 @@ github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuy github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137 h1:s6gZFSlWYmbqAuRjVTiNNhvNRfY2Wxp9nhfyel4rklc= +github.com/alecthomas/units v0.0.0-20211218093645-b94a6e3cc137/go.mod h1:OMCwj8VM1Kc9e19TLln2VL61YJF0x1XFtfdL4JdbSyE= github.com/alexflint/go-filemutex v1.1.0/go.mod h1:7P4iRhttt/nUvUOrYIhcpMzv2G6CY9UnI16Z+UJqRyk= github.com/andybalholm/brotli v1.0.1/go.mod h1:loMXtMfwqflxFJPmdbJO0a3KNoPuLBgiu3qAvBg8x/Y= github.com/andybalholm/brotli v1.0.4 h1:V7DdXeJtZscaqfNuAdSRuRFzuiKlHSC/Zh3zl9qY3JY= github.com/andybalholm/brotli v1.0.4/go.mod h1:fO7iG3H7G2nSZ7m0zPUDn85XEX2GTukHGRSepvi9Eig= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= +github.com/antihax/optional v1.0.0 h1:xK2lYat7ZLaVVcIuj82J8kIro4V6kDe0AUDFboUCwcg= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= @@ -707,6 +710,7 @@ github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORN github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0= +github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= @@ -953,6 +957,7 @@ github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6So github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k= +github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww= github.com/russross/blackfriday v1.6.0/go.mod h1:ti0ldHuxg49ri4ksnFxlkCfN+hvslNlmVHqNRXXJNAY= @@ -1024,6 +1029,8 @@ github.com/stefanberger/go-pkcs11uri v0.0.0-20201008174630-78d3cae3a980/go.mod h github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/storageos/etcd-cluster-operator v0.3.0 h1:IAjRDwJcJQGcTZZsdMyM//2/pLe/A+oTGd0hVGbCww8= github.com/storageos/etcd-cluster-operator v0.3.0/go.mod h1:iNHxBZEgTG94hWcr6zwEEfv58dLUSnWtvIkiThampx8= +github.com/storageos/go-api v0.0.0-20221110132555-4ecf9f9b9429 h1:dwZKmnSDyBhp0jI7HCGCy5HJ22ilNKwWh0HP1ybMwJ0= +github.com/storageos/go-api v0.0.0-20221110132555-4ecf9f9b9429/go.mod h1:8nTfqI+/wEmSs+gk/U1ZljZ+GnduPqi0HwIVXep9pjk= github.com/storageos/operator v0.0.0-20220620091939-c98630624350 h1:uclVTYQEVH49W8ZYAx2NIi3XNns6Osro18zN4wT/dQ4= github.com/storageos/operator v0.0.0-20220620091939-c98630624350/go.mod h1:oSB2cRdjHklzd/sgQb4V/KK/xXAfTriDC95+8SLsOpc= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= diff --git a/main.go b/main.go index f9aade68..b68b9d7b 100644 --- a/main.go +++ b/main.go @@ -57,6 +57,7 @@ func RootCmd() *cobra.Command { cobracmd.AddCommand(cmd.NfsCmd()) cobracmd.AddCommand(cmd.CordonCmd()) cobracmd.AddCommand(cmd.UncordonCmd()) + cobracmd.AddCommand(cmd.TestAPICmd()) cobracmd.AddCommand(cmd.CompletionCmd) return cobracmd diff --git a/model/cluster.go b/model/cluster.go new file mode 100644 index 00000000..b1a9621c --- /dev/null +++ b/model/cluster.go @@ -0,0 +1,46 @@ +package model + +import "time" + +// LogLevel is a typed wrapper around a cluster's log level configuration. +type LogLevel string + +// LogLevelFromString wraps level as a LogLevel. +func LogLevelFromString(level string) LogLevel { + return LogLevel(level) +} + +// String returns the string representation of l. +func (l LogLevel) String() string { + return string(l) +} + +// LogFormat is a typed wrapper around a cluster's log entry format +// configuration. +type LogFormat string + +// LogFormatFromString wraps format as a LogFormat. +func LogFormatFromString(format string) LogFormat { + return LogFormat(format) +} + +// String returns the string representation of f. +func (f LogFormat) String() string { + return string(f) +} + +// Resource encapsulate a StorageOS cluster api resource as a data type. +type Cluster struct { + ID string `json:"id"` + + DisableTelemetry bool `json:"disableTelemetry"` + DisableCrashReporting bool `json:"disableCrashReporting"` + DisableVersionCheck bool `json:"disableVersionCheck"` + + LogLevel LogLevel `json:"logLevel"` + LogFormat LogFormat `json:"logFormat"` + + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Version string `json:"version"` +} diff --git a/model/licence.go b/model/licence.go new file mode 100644 index 00000000..c0b9d363 --- /dev/null +++ b/model/licence.go @@ -0,0 +1,42 @@ +package model + +import ( + "fmt" + "time" + + "github.com/alecthomas/units" +) + +// Resource describes a StorageOS product licence and the features included with +// it. +type License struct { + ClusterID string `json:"clusterID"` + ExpiresAt time.Time `json:"expiresAt"` + ClusterCapacityBytes uint64 `json:"clusterCapacityBytes"` + UsedBytes uint64 `json:"usedBytes"` + Kind string `json:"kind"` + CustomerName string `json:"customerName"` + Features []string `json:"features"` + Version string `json:"version"` +} + +func (l *License) String() string { + return fmt.Sprintf(`Cluster ID: %v +Expires at: %v +Cluster capacity: %v +Used Bytes: %v +Kind: %v +Customer name: %v +Features: %v +Version: %v +`, + l.ClusterID, + l.ExpiresAt.Format(time.RFC3339), + units.Base2Bytes(l.ClusterCapacityBytes).String(), + units.Base2Bytes(l.UsedBytes).String(), + l.Kind, + l.CustomerName, + l.Features, + l.Version, + ) +} diff --git a/model/namespace.go b/model/namespace.go new file mode 100644 index 00000000..cd240907 --- /dev/null +++ b/model/namespace.go @@ -0,0 +1,14 @@ +package model + +import "time" + +// Resource encapsulates a StorageOS namespace API resource as a data type. +type Namespace struct { + ID string `json:"id"` + Name string `json:"name"` + Labels map[string]string `json:"labels"` + + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Version string `json:"version"` +} diff --git a/model/node.go b/model/node.go new file mode 100644 index 00000000..7fb57595 --- /dev/null +++ b/model/node.go @@ -0,0 +1,37 @@ +package model + +import ( + "time" + + "github.com/storageos/kubectl-storageos/pkg/health" +) + +// Stats struct for CapacityStats +type Stats struct { + // Total bytes in the filesystem + Total uint64 `json:"total,omitempty"` + // Free bytes in the filesystem available to root user + Free uint64 `json:"free,omitempty"` +} + +// Resource encapsulates a StorageOS node API resource as a data type. +type Node struct { + ID string `json:"id"` + Name string `json:"name"` + Health health.NodeState `json:"health"` + Capacity Stats `json:"capacity,omitempty"` + + IOAddr string `json:"ioAddress"` + SupervisorAddr string `json:"supervisorAddress"` + GossipAddr string `json:"gossipAddress"` + ClusteringAddr string `json:"clusteringAddress"` + + Cordoned bool `json:"cordoned"` + CordonedAt time.Time `json:"cordonedAt"` + + Labels map[string]string `json:"labels"` + + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Version string `json:"version"` +} diff --git a/model/policygroup.go b/model/policygroup.go new file mode 100644 index 00000000..f3ada5e6 --- /dev/null +++ b/model/policygroup.go @@ -0,0 +1,29 @@ +package model + +import "time" + +// Resource encapsulates a StorageOS policy group API resource as a data type. +type PolicyGroup struct { + ID string `json:"id"` + Name string `json:"name"` + Users []*PolicyGroupMember `json:"users"` + Specs []*PolicyGroupSpec `json:"specs"` + + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Version string `json:"version"` +} + +// PolicyGroupSpec encapsulates a policy specification API resource belonging to a policy +// group as a data type. +type PolicyGroupSpec struct { + NamespaceID string `json:"namespaceID"` + ResourceType string `json:"resourceType"` + ReadOnly bool `json:"readOnly"` +} + +// PolicyGroupMember represents the details of a user that is a member of a policy group. +type PolicyGroupMember struct { + ID string `json:"id"` + Username string `json:"username"` +} diff --git a/model/user.go b/model/user.go new file mode 100644 index 00000000..980d4b9f --- /dev/null +++ b/model/user.go @@ -0,0 +1,16 @@ +package model + +import "time" + +// Resource encapsulates a StorageOS user API resource as data type. +type User struct { + ID string `json:"id"` + Username string `json:"name"` + + IsAdmin bool `json:"isAdmin"` + Groups []string `json:"groups"` + + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Version string `json:"version"` +} diff --git a/model/volume.go b/model/volume.go new file mode 100644 index 00000000..e0b982e1 --- /dev/null +++ b/model/volume.go @@ -0,0 +1,181 @@ +package model + +import ( + "strconv" + "strings" + "time" + + "github.com/storageos/kubectl-storageos/pkg/health" +) + +const ( + // LabelNoCache is a StorageOS volume label which when enabled disables the + // caching of volume data. + LabelNoCache = "storageos.com/nocache" + // LabelNoCompress is a StorageOS volume label which when enabled disables the + // compression of volume data (both at rest and during transit). + LabelNoCompress = "storageos.com/nocompress" + // LabelReplicas is a StorageOS volume label which decides how many replicas + // must be provisioned for that volume. + LabelReplicas = "storageos.com/replicas" + // LabelThrottle is a StorageOS volume label which when enabled deprioritises + // the volume's traffic by reducing disk I/O rate. + LabelThrottle = "storageos.com/throttle" +) + +// FsType indicates the kind of filesystem which a volume has been given. +type FsType string + +// String returns the name string for fs. +func (fs FsType) String() string { + return string(fs) +} + +// FsTypeFromString wraps name as an FsType. It doesn't perform validity +// checks. +func FsTypeFromString(name string) FsType { + return FsType(name) +} + +// AttachType The attachment type of a volume. "host" indicates that the volume +// is consumed by the node it is attached to. +type AttachType string + +// List of AttachType +const ( + AttachTypeUnknown AttachType = "unknown" + AttachTypeDetached AttachType = "detached" + AttachTypeNFS AttachType = "nfs" + AttachTypeHost AttachType = "host" +) + +// AttachTypeFromString wraps name as an AttachType. It doesn't perform validity +// checks. +func AttachTypeFromString(name string) AttachType { + return AttachType(name) +} + +// String returns the string representation of the current AttachType +func (a AttachType) String() string { + return string(a) +} + +// Resource encapsulates a StorageOS volume API resource as a data type. +type Volume struct { + ID string `json:"id"` + Name string `json:"name"` + Description string `json:"description"` + AttachedOn string `json:"attachedOn"` + AttachmentType AttachType `json:"attachmentType"` + Nfs NFSConfig `json:"nfs"` + + Namespace string `json:"namespaceID"` + Labels map[string]string `json:"labels"` + TopologyLabels TopologyLabels `json:"topologyLabels"` + Filesystem FsType `json:"filesystem"` + SizeBytes uint64 `json:"sizeBytes"` + + Master *Deployment `json:"master"` + Replicas []*Deployment `json:"replicas"` + + CreatedAt time.Time `json:"createdAt"` + UpdatedAt time.Time `json:"updatedAt"` + Version string `json:"version"` +} + +type TopologyLabels struct { + RequisiteZones []string + PreferredZones []string +} + +func (t *TopologyLabels) String() (string, string) { + return strings.Join(t.RequisiteZones, ", "), strings.Join(t.PreferredZones, ", ") +} + +// Deployment encapsulates a deployment instance for a +// volume as a data type. +type Deployment struct { + ID string `json:"id"` + Node string `json:"nodeID"` + Health health.VolumeState `json:"health"` + Promotable bool `json:"promotable"` + SyncProgress *SyncProgress `json:"syncProgress,omitempty"` +} + +// SyncProgress is a point-in-time snapshot of an ongoing sync operation. +type SyncProgress struct { + BytesRemaining uint64 `json:"bytesRemaining"` + ThroughputBytes uint64 `json:"throughputBytes"` + EstimatedSecondsRemaining uint64 `json:"estimatedSecondsRemaining"` +} + +// NFSConfig contains a config for NFS attaching containing and endpoint and a +// list of exports. +type NFSConfig struct { + Exports []NFSExportConfig `json:"exports"` + ServiceEndpoint string `json:"serviceEndpoint"` +} + +// NFSExportConfig contains a single export configuration for NFS attaching. +type NFSExportConfig struct { + ExportID uint `json:"exportID"` + Path string `json:"path"` + PseudoPath string `json:"pseudoPath"` + ACLs []NFSExportConfigACL `json:"acls"` +} + +// NFSExportConfigACL contains a single ACL policy for NFS attaching export +// configuration. +type NFSExportConfigACL struct { + Identity NFSExportConfigACLIdentity `json:"identity"` + SquashConfig NFSExportConfigACLSquashConfig `json:"squashConfig"` + AccessLevel string `json:"accessLevel"` +} + +// NFSExportConfigACLIdentity contains identity info for an ACL in a NFS export +// config. +type NFSExportConfigACLIdentity struct { + IdentityType string `json:"identityType"` + Matcher string `json:"matcher"` +} + +// NFSExportConfigACLSquashConfig contains squash info for an ACL in a NFS +// export config. +type NFSExportConfigACLSquashConfig struct { + GID int64 `json:"gid"` + UID int64 `json:"uid"` + Squash string `json:"squash"` +} + +// IsCachingDisabled returns if the volume resource is configured to disable +// caching of data. +func (r *Volume) IsCachingDisabled() (bool, error) { + value, exists := r.Labels[LabelNoCache] + if !exists { + return false, nil + } + + return strconv.ParseBool(value) +} + +// IsCompressionDisabled returns if the volume resource is configured to disable +// compression of data at rest and during transit. +func (r *Volume) IsCompressionDisabled() (bool, error) { + value, exists := r.Labels[LabelNoCompress] + if !exists { + return false, nil + } + + return strconv.ParseBool(value) +} + +// IsThrottleEnabled returns if the volume resource is configured to have its +// traffic deprioritised by reducing its disk I/O rate. +func (r *Volume) IsThrottleEnabled() (bool, error) { + value, exists := r.Labels[LabelThrottle] + if !exists { + return false, nil + } + + return strconv.ParseBool(value) +} diff --git a/pkg/atomicfile/atomicfile.go b/pkg/atomicfile/atomicfile.go new file mode 100644 index 00000000..b0bba64a --- /dev/null +++ b/pkg/atomicfile/atomicfile.go @@ -0,0 +1,87 @@ +// Package atomicfile exports a mechanism for transactional writes to a file. +package atomicfile + +import ( + "errors" + "os" + "path/filepath" +) + +var errCloseDisallowed = errors.New("cannot call close on transactional write file, must commit or abort") + +// Write adds transactional write behaviour to an *os.File. +// +// It extends the method set to include Commit() and Abort(), but blocks direct +// calls to Close(). +type Write struct { + *os.File + + targetPath string +} + +// NewWrite creates a temporary file in the same directory as the destination +// file. +// +// The same directory is used for the temp file because cross-filesystem +// renames are not atomic and it is possible that the OS temp directory is +// mounted on a different filesystem to targetPath. +// +// The returned Write may be used as a normal file, except it must be committed +// to save to targetPath once any writing is finished. +func NewWrite(targetPath string) (*Write, error) { + targetDir, fileName := filepath.Split(targetPath) + + tempFile, err := os.CreateTemp(targetDir, fileName+"-txn-*.json") + if err != nil { + return nil, err + } + + return &Write{ + File: tempFile, + + targetPath: targetPath, + }, nil +} + +// Abort closes the staged transaction file, removing it from disk. This +// discards any data written to the transaction file. +func (txn *Write) Abort() error { + err := txn.File.Close() + if err != nil { + return err + } + + err = os.Remove(txn.File.Name()) + if err != nil { + return err + } + + return nil +} + +// Commit flushes any pending writes to the transaction file, atomically +// writes it to the target file path and then closes it. +func (txn *Write) Commit() error { + err := txn.File.Sync() + if err != nil { + return err + } + + err = os.Rename(txn.File.Name(), txn.targetPath) + if err != nil { + return err + } + + err = txn.File.Close() + if err != nil { + return err + } + + return nil +} + +// Close will always error. Allowing consumers of a transaction to +// independently close the file breaks the transactional semantics. +func (txn *Write) Close() error { + return errCloseDisallowed +} diff --git a/pkg/consts/consts.go b/pkg/consts/consts.go index 893d6b9a..c78e28b4 100644 --- a/pkg/consts/consts.go +++ b/pkg/consts/consts.go @@ -23,4 +23,6 @@ const ( VersionRegex = "v?([0-9]+.[0-9]+.[0-9]+)" ShaVersionRegex = "^[a-fA-F0-9]+$" + + DefaultStorageOSEndpoint = "http://localhost:5705" ) diff --git a/pkg/health/node_state.go b/pkg/health/node_state.go new file mode 100644 index 00000000..92a32b5e --- /dev/null +++ b/pkg/health/node_state.go @@ -0,0 +1,31 @@ +package health + +import openapi "github.com/storageos/go-api/autogenerated" + +// NodeState represents the health state in which a node could be +type NodeState string + +// Are all States a node could be. +const ( + NodeOnline NodeState = NodeState(openapi.NODEHEALTH_ONLINE) + NodeOffline = NodeState(openapi.NODEHEALTH_OFFLINE) + NodeUnknown = NodeState(openapi.NODEHEALTH_UNKNOWN) +) + +// NodeFromString parses a string and return the matching node state. +// If the string is not recognized, Unknown is returned +func NodeFromString(s string) NodeState { + switch s { + case string(openapi.NODEHEALTH_ONLINE): + return NodeOnline + case string(openapi.NODEHEALTH_OFFLINE): + return NodeOffline + default: + return NodeUnknown + } +} + +// String returns the string representation of the State +func (n NodeState) String() string { + return string(n) +} diff --git a/pkg/health/volume_state.go b/pkg/health/volume_state.go new file mode 100644 index 00000000..8168eeea --- /dev/null +++ b/pkg/health/volume_state.go @@ -0,0 +1,64 @@ +package health + +import ( + openapi "github.com/storageos/go-api/autogenerated" +) + +// VolumeState represents the health state in which a volume could be +type VolumeState string + +// All States a node could be. +const ( + ReplicaRecovering VolumeState = VolumeState(openapi.REPLICAHEALTH_RECOVERING) + ReplicaProvisioning = VolumeState(openapi.REPLICAHEALTH_PROVISIONING) + ReplicaProvisioned = VolumeState(openapi.REPLICAHEALTH_PROVISIONED) + ReplicaSyncing = VolumeState(openapi.REPLICAHEALTH_SYNCING) + ReplicaReady = VolumeState(openapi.REPLICAHEALTH_READY) + ReplicaDeleted = VolumeState(openapi.REPLICAHEALTH_DELETED) + ReplicaFailed = VolumeState(openapi.REPLICAHEALTH_FAILED) + ReplicaUnknown = VolumeState(openapi.REPLICAHEALTH_UNKNOWN) + MasterOnline = VolumeState(openapi.MASTERHEALTH_ONLINE) + MasterOffline = VolumeState(openapi.MASTERHEALTH_OFFLINE) + MasterUnknown = VolumeState(openapi.MASTERHEALTH_UNKNOWN) +) + +// ReplicaFromString returns the replica State matching the string in input. +// If the string doesn't match any of the known state, unknown is returned. +func ReplicaFromString(s string) VolumeState { + switch s { + case string(openapi.REPLICAHEALTH_RECOVERING): + return ReplicaRecovering + case string(openapi.REPLICAHEALTH_PROVISIONING): + return ReplicaProvisioning + case string(openapi.REPLICAHEALTH_PROVISIONED): + return ReplicaProvisioned + case string(openapi.REPLICAHEALTH_SYNCING): + return ReplicaSyncing + case string(openapi.REPLICAHEALTH_READY): + return ReplicaReady + case string(openapi.REPLICAHEALTH_DELETED): + return ReplicaDeleted + case string(openapi.REPLICAHEALTH_FAILED): + return ReplicaFailed + default: + return ReplicaUnknown + } +} + +// MasterFromString returns the master State matching the string in input. +// If the string doesn't match any of the known state, unknown is returned. +func MasterFromString(s string) VolumeState { + switch s { + case string(openapi.MASTERHEALTH_ONLINE): + return MasterOnline + case string(openapi.MASTERHEALTH_OFFLINE): + return MasterOffline + default: + return MasterUnknown + } +} + +// String returns the string representation of the State +func (v VolumeState) String() string { + return string(v) +} diff --git a/pkg/utils/k8s.go b/pkg/utils/k8s.go index efff0f04..3cc3209c 100644 --- a/pkg/utils/k8s.go +++ b/pkg/utils/k8s.go @@ -5,7 +5,9 @@ import ( "context" "fmt" "io" + "net/url" "regexp" + "strconv" "strings" "time" @@ -568,6 +570,69 @@ func GetFirstStorageOSCluster(config *rest.Config) (*operatorapi.StorageOSCluste return stosCluster, nil } +func GetFirstStorageOSNodePod(config *rest.Config, namespace string) (*corev1.Pod, error) { + nodePodList, err := ListPods(config, namespace, "app.kubernetes.io/component=control-plane") + if err != nil { + return nil, err + } + + if len(nodePodList.Items) == 0 { + return nil, errors.WithStack(fmt.Errorf("no storageos-node pods found")) + } + + for _, pod := range nodePodList.Items { + if pod.Status.Phase == corev1.PodRunning { + return &pod, nil + } + } + + return nil, errors.WithStack(fmt.Errorf("no storageos-node pod in running phase")) +} + +func GetFirstStorageOSAPIEndpoint(config *rest.Config) (string, error) { + stosCluster, err := GetFirstStorageOSCluster(config) + if err != nil { + return "", err + } + + // the cluster namespace can be used to fetch the storageos-node pods as they are always the same. + pod, err := GetFirstStorageOSNodePod(config, stosCluster.Namespace) + if err != nil { + return "", err + } + + for _, container := range pod.Spec.Containers { + if container.Name == "storageos" { + for _, port := range container.Ports { + if port.Name == "api" { + endpoint, err := url.Parse("http://" + pod.Status.HostIP + ":" + strconv.Itoa(int(port.HostPort))) + if err != nil { + return "", fmt.Errorf("error parsing storageos api endpoint url") + } + return endpoint.String(), nil + } + } + } + } + + return "", fmt.Errorf("unable to get storageos api endpoint") +} + +func GetAPICredentialsFromSecret(config *rest.Config) (string, string, error) { + stosCluster, err := GetFirstStorageOSCluster(config) + if err != nil { + return "", "", err + } + + // the cluster namespace can be used to fetch the secret as they are always the same. + secret, err := GetSecret(config, stosCluster.Spec.SecretRefName, stosCluster.Namespace) + if err != nil { + return "", "", err + } + + return string(secret.Data["username"]), string(secret.Data["password"]), nil +} + func storageOSOperatorClient(config *rest.Config) (client.Client, error) { scheme := runtime.NewScheme() if err := operatorapi.AddToScheme(scheme); err != nil {