Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Storage: Add support for storage bucket backup (from Incus) #13924

Open
wants to merge 21 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from 19 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
16 changes: 16 additions & 0 deletions client/interfaces.go
Original file line number Diff line number Diff line change
Expand Up @@ -361,6 +361,12 @@ type InstanceServer interface {
UpdateStoragePoolBucketKey(poolName string, bucketName string, keyName string, key api.StorageBucketKeyPut, ETag string) (err error)
DeleteStoragePoolBucketKey(poolName string, bucketName string, keyName string) (err error)

// Storage bucket backup functions ("storage_bucket_backup" API extension)
CreateStoragePoolBucketBackup(poolName string, bucketName string, backup api.StorageBucketBackupsPost) (op Operation, err error)
DeleteStoragePoolBucketBackup(pool string, bucketName string, name string) (op Operation, err error)
GetStoragePoolBucketBackupFile(pool string, bucketName string, name string, req *BackupFileRequest) (resp *BackupFileResponse, err error)
CreateStoragePoolBucketFromBackup(pool string, args StoragePoolBucketBackupArgs) (op Operation, err error)

// List all volumes functions ("storage_volumes_all" API extension)
GetVolumesWithFilter(filters []string) (volumes []api.StorageVolume, err error)
GetVolumesWithFilterAllProjects(filters []string) (volumes []api.StorageVolume, err error)
Expand Down Expand Up @@ -751,3 +757,13 @@ type GetPermissionsArgs struct {
// level permissions will not be returned.
ProjectName string
}

// The StoragePoolBucketBackupArgs struct is used when creating a storage volume from a backup.
// API extension: storage_bucket_backup.
type StoragePoolBucketBackupArgs struct {
// The backup file
BackupFile io.Reader

// Name to import backup as
Name string
}
156 changes: 156 additions & 0 deletions client/lxd_storage_buckets.go
Original file line number Diff line number Diff line change
@@ -1,7 +1,15 @@
package lxd

import (
"fmt"
"io"
"net/http"
"net/url"

"github.com/canonical/lxd/shared/api"
"github.com/canonical/lxd/shared/cancel"
"github.com/canonical/lxd/shared/ioprogress"
"github.com/canonical/lxd/shared/units"
)

// GetStoragePoolBucketNames returns a list of storage bucket names.
Expand Down Expand Up @@ -233,3 +241,151 @@ func (r *ProtocolLXD) DeleteStoragePoolBucketKey(poolName string, bucketName str

return nil
}

// CreateStoragePoolBucketBackup creates a new storage bucket backup.
func (r *ProtocolLXD) CreateStoragePoolBucketBackup(poolName string, bucketName string, backup api.StorageBucketBackupsPost) (Operation, error) {
err := r.CheckExtension("storage_bucket_backup")
if err != nil {
return nil, err
}

op, _, err := r.queryOperation("POST", fmt.Sprintf("/storage-pools/%s/buckets/%s/backups", url.PathEscape(poolName), url.PathEscape(bucketName)), backup, "", true)
if err != nil {
return nil, err
}

return op, nil
}

// DeleteStoragePoolBucketBackup deletes an existing storage bucket backup.
func (r *ProtocolLXD) DeleteStoragePoolBucketBackup(pool string, bucketName string, name string) (Operation, error) {
err := r.CheckExtension("storage_bucket_backup")
if err != nil {
return nil, err
}

op, _, err := r.queryOperation("DELETE", fmt.Sprintf("/storage-pools/%s/buckets/%s/backups/%s", url.PathEscape(pool), url.PathEscape(bucketName), url.PathEscape(name)), nil, "", true)
if err != nil {
return nil, err
}

return op, nil
}

// GetStoragePoolBucketBackupFile returns the storage bucket file.
func (r *ProtocolLXD) GetStoragePoolBucketBackupFile(pool string, bucketName string, name string, req *BackupFileRequest) (*BackupFileResponse, error) {
err := r.CheckExtension("storage_bucket_backup")
if err != nil {
return nil, err
}

// Build the URL
uri := fmt.Sprintf("%s/1.0/storage-pools/%s/buckets/%s/backups/%s/export", r.httpBaseURL.String(), url.PathEscape(pool), url.PathEscape(bucketName), url.PathEscape(name))

if r.project != "" {
uri += fmt.Sprintf("?project=%s", url.QueryEscape(r.project))
}

// Prepare the download request
request, err := http.NewRequest("GET", uri, nil)
if err != nil {
return nil, err
}

if r.httpUserAgent != "" {
request.Header.Set("User-Agent", r.httpUserAgent)
}

// Start the request
response, doneCh, err := cancel.CancelableDownload(req.Canceler, r.DoHTTP, request)
if err != nil {
return nil, err
}

defer func() { _ = response.Body.Close() }()
defer close(doneCh)

if response.StatusCode != http.StatusOK {
_, _, err := lxdParseResponse(response)
if err != nil {
return nil, err
}
}

// Handle the data
body := response.Body
if req.ProgressHandler != nil {
body = &ioprogress.ProgressReader{
ReadCloser: response.Body,
Tracker: &ioprogress.ProgressTracker{
Length: response.ContentLength,
Handler: func(percent int64, speed int64) {
req.ProgressHandler(ioprogress.ProgressData{Text: fmt.Sprintf("%d%% (%s/s)", percent, units.GetByteSizeString(speed, 2))})
},
},
}
}

size, err := io.Copy(req.BackupFile, body)
if err != nil {
return nil, err
}

resp := BackupFileResponse{}
resp.Size = size

return &resp, nil
}

// CreateStoragePoolBucketFromBackup creates a storage pool bucket using a backup.
func (r *ProtocolLXD) CreateStoragePoolBucketFromBackup(pool string, args StoragePoolBucketBackupArgs) (Operation, error) {
if !r.HasExtension("storage_bucket_backup") {
return nil, fmt.Errorf(`The server is missing the required "custom_volume_backup" API extension`)
}

path := fmt.Sprintf("/storage-pools/%s/buckets", url.PathEscape(pool))

// Prepare the HTTP request.
reqURL, err := r.setQueryAttributes(fmt.Sprintf("%s/1.0%s", r.httpBaseURL.String(), path))
if err != nil {
return nil, err
}

req, err := http.NewRequest("POST", reqURL, args.BackupFile)
if err != nil {
return nil, err
}

req.Header.Set("Content-Type", "application/octet-stream")

if args.Name != "" {
req.Header.Set("X-LXD-name", args.Name)
}

// Send the request.
resp, err := r.DoHTTP(req)
if err != nil {
return nil, err
}

defer func() { _ = resp.Body.Close() }()

// Handle errors.
response, _, err := lxdParseResponse(resp)
if err != nil {
return nil, err
}

respOperation, err := response.MetadataAsOperation()
if err != nil {
return nil, err
}

op := operation{
Operation: *respOperation,
r: r,
chActive: make(chan bool),
}

return &op, nil
}
15 changes: 15 additions & 0 deletions doc/api-extensions.md
Original file line number Diff line number Diff line change
Expand Up @@ -2542,3 +2542,18 @@ This introduces the configuration keys {config:option}`storage-ceph-pool-conf:ce
## `network_get_target`

Adds optional `target` parameter to `GET /1.0/network`. When target is set, forward the request to the specified cluster member and return the non-managed interfaces from that member.

## `storage_bucket_backup`

Add storage bucket backup support.

This includes the following new endpoints (see [RESTful API](rest-api.md) for details):

* `GET /1.0/storage-pools/<pool>/buckets/<bucket>/backups`
* `POST /1.0/storage-pools/<pool>/buckets/<bucket>/backups`

* `GET /1.0/storage-pools/<pool>/buckets/<bucket>/backups/<name>`
* `POST /1.0/storage-pools/<pool>/buckets/<bucket>/backups/<name>`
* `DELETE /1.0/storage-pools/<pool>/buckets/<bucket>/backups/<name>`

* `GET /1.0/storage-pools/<pool>/buckets/<bucket>/backups/<name>/export`
3 changes: 3 additions & 0 deletions doc/metadata.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6577,6 +6577,9 @@ using the `zfs` command in the container.
`can_view`
: Grants permission to view the storage bucket.

`can_manage_backups`
: Grants permission to create and delete backups of the storage bucket.


<!-- entity group storage_bucket end -->
<!-- entity group storage_pool start -->
Expand Down
Loading