Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Expose build status endpoint #18

Merged
merged 10 commits into from
Feb 13, 2024
21 changes: 19 additions & 2 deletions README.md
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
# go-softpack-builder (gsb)
Go implementation of a softpack builder service.

After receiving a POST (see Testing section below) with desired environment
details, this service does the following:
After receiving a POST to `/environments/build` (see Testing section below) with
desired environment details, this service does the following:

1. A singularity definition file, singularity.def, is created and uploaded to
an environment-specific subdirectory of your S3 build location.
Expand Down Expand Up @@ -43,6 +43,23 @@ details, this service does the following:
It can be reproduced exactly at any time using the singularity.def, assuming
you configure specific images (ie. not :latest) to use.

After receiving a GET to `/environments/status`, this service returns a JSON
response with the following structure:

```json
[
{
"Name": "users/foo/bar",
"Requested": "2024-02-12T11:58:49.808672303Z",
"BuildStart": "2024-02-12T11:58:55.430080969Z",
"BuildDone": "2024-02-12T11:59:00.532174828Z"
}
]
```

The times are quoted strings in the RFC 3339 format with sub-second precision,
or null.

## Initial setup

You'll need an S3 bucket to be a binary cache, which needs GPG keys. Here's one
Expand Down
185 changes: 134 additions & 51 deletions build/builder.go
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*******************************************************************************
* Copyright (c) 2023 Genome Research Ltd.
* Copyright (c) 2023, 2024 Genome Research Ltd.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
Expand Down Expand Up @@ -39,22 +39,17 @@ import (
"strings"
"sync"
"text/template"
"time"

"github.com/wtsi-hgi/go-softpack-builder/config"
"github.com/wtsi-hgi/go-softpack-builder/git"
"github.com/wtsi-hgi/go-softpack-builder/internal"
"github.com/wtsi-hgi/go-softpack-builder/internal/core"
"github.com/wtsi-hgi/go-softpack-builder/s3"
"github.com/wtsi-hgi/go-softpack-builder/wr"
)

const (
SingularityDefBasename = "singularity.def"
ExesBasename = "executables"
SoftpackYaml = "softpack.yml"
SpackLockFile = "spack.lock"
BuilderOut = "builder.out"
moduleForCoreBasename = "module"
UsageBasename = "README.md"

uploadEndpoint = "/upload"
)

Expand All @@ -71,18 +66,14 @@ func init() { //nolint:gochecknoinits
softpackTmpl = template.Must(template.New("").Parse(softpackTmplStr))
}

type Error string

func (e Error) Error() string { return string(e) }

const (
ErrInvalidJSON = Error("invalid spack lock JSON")
ErrEnvironmentBuilding = Error("build already running for environment")
ErrInvalidJSON = internal.Error("invalid spack lock JSON")
ErrEnvironmentBuilding = internal.Error("build already running for environment")

ErrInvalidEnvPath = Error("invalid environment path")
ErrInvalidVersion = Error("environment version required")
ErrNoPackages = Error("packages required")
ErrNoPackageName = Error("package names required")
ErrInvalidEnvPath = internal.Error("invalid environment path")
ErrInvalidVersion = internal.Error("environment version required")
ErrNoPackages = internal.Error("packages required")
ErrNoPackageName = internal.Error("package names required")
)

// Package describes the name and optional version of a spack package.
Expand Down Expand Up @@ -181,36 +172,70 @@ func (d *Definition) Validate() error {
return d.Packages.Validate()
}

type S3 interface {
UploadData(data io.Reader, dest string) error
OpenFile(source string) (io.ReadCloser, error)
}

type Runner interface {
Add(deployment string) (string, error)
WaitForRunning(id string) error
Wait(id string) (wr.WRJobStatus, error)
Status(id string) (wr.WRJobStatus, error)
}

// The status of an individual build – when it was requested, when it started
// actually being built, and when its build finished.
type Status struct {
Name string
Requested *time.Time
BuildStart *time.Time
BuildDone *time.Time
}

// Builder lets you do builds given config, S3 and a wr runner.
type Builder struct {
config *config.Config
s3 interface {
UploadData(data io.Reader, dest string) error
OpenFile(source string) (io.ReadCloser, error)
}
runner interface {
Run(deployment string) error
}
s3 S3
runner Runner

mu sync.Mutex
runningEnvironments map[string]bool
postBuild func()
postBuildMu sync.RWMutex

postBuildMu sync.RWMutex
postBuild func()

statusMu sync.RWMutex
statuses map[string]*Status

runnerPollInterval time.Duration
}

// New takes the s3 build cache URL, the repo and checkout reference of your
// custom spack repo, and returns a Builder.
func New(config *config.Config) (*Builder, error) {
s3helper, err := s3.New(config.S3.BuildBase)
if err != nil {
return nil, err
// custom spack repo, and returns a Builder. Optionally, supply objects that
// satisfy the S3 and Runner interfaces; if nil, these default to using the s3
// and wr packages.
func New(config *config.Config, s3helper S3, runner Runner) (*Builder, error) {
if s3helper == nil {
var err error

s3helper, err = s3.New(config.S3.BuildBase)
if err != nil {
return nil, err
}
}

if runner == nil {
runner = wr.New(config.WRDeployment)
}

return &Builder{
config: config,
s3: s3helper,
runner: wr.New(config.WRDeployment),
runner: runner,
runningEnvironments: make(map[string]bool),
statuses: make(map[string]*Status),
runnerPollInterval: 1 * time.Second,
}, nil
}

Expand All @@ -233,10 +258,26 @@ func (b *Builder) SetPostBuildCallback(cb func()) {
b.postBuild = cb
}

// Status returns the status of all known builds.
func (b *Builder) Status() []Status {
b.statusMu.RLock()
defer b.statusMu.RUnlock()

statuses := make([]Status, 0, len(b.statuses))

for _, status := range b.statuses {
statuses = append(statuses, *status)
}

return statuses
}

// Build uploads a singularity.def generated by GenerateSingularityDef() to S3
// and adds a job to wr to build the image. You'll need a wr manager running
// that can run jobs with root and access the S3, ie. a cloud deployment.
func (b *Builder) Build(def *Definition) (err error) {
b.buildStatus(def)

var fn func()

fn, err = b.protectEnvironment(def.FullEnvironmentPath(), &err)
Expand Down Expand Up @@ -268,6 +309,26 @@ func (b *Builder) Build(def *Definition) (err error) {
return nil
}

func (b *Builder) buildStatus(def *Definition) *Status {
b.statusMu.Lock()
defer b.statusMu.Unlock()

name := filepath.Join(def.EnvironmentPath, def.EnvironmentName) + "-" + def.EnvironmentVersion

status, exists := b.statuses[name]
if !exists {
now := time.Now()
status = &Status{
Name: name,
Requested: &now,
}

b.statuses[name] = status
}

return status
}

func (b *Builder) protectEnvironment(envPath string, err *error) (func(), error) {
b.mu.Lock()

Expand Down Expand Up @@ -300,7 +361,7 @@ func (b *Builder) generateAndUploadSingularityDef(def *Definition, s3Path string
return "", err
}

singDefUploadPath := filepath.Join(s3Path, SingularityDefBasename)
singDefUploadPath := filepath.Join(s3Path, core.SingularityDefBasename)

err = b.s3.UploadData(strings.NewReader(singDef), singDefUploadPath)

Expand Down Expand Up @@ -340,7 +401,29 @@ func (b *Builder) startBuild(def *Definition, wrInput, s3Path, singDef, singDefP
}

func (b *Builder) asyncBuild(def *Definition, wrInput, s3Path, singDef string) error {
err := b.runner.Run(wrInput)
status := b.buildStatus(def)

jobID, err := b.runner.Add(wrInput)
if err != nil {
return err
}

err = b.runner.WaitForRunning(jobID)
if err != nil {
return err
}

b.statusMu.Lock()
buildStart := time.Now()
status.BuildStart = &buildStart
b.statusMu.Unlock()

_, err = b.runner.Wait(jobID)

b.statusMu.Lock()
buildDone := time.Now()
status.BuildDone = &buildDone
b.statusMu.Unlock()

b.postBuildMu.RLock()
if b.postBuild != nil {
Expand Down Expand Up @@ -371,22 +454,22 @@ func (b *Builder) asyncBuild(def *Definition, wrInput, s3Path, singDef string) e
}

func (b *Builder) addLogToRepo(s3Path, environmentPath string) {
log, err := b.s3.OpenFile(filepath.Join(s3Path, BuilderOut))
log, err := b.s3.OpenFile(filepath.Join(s3Path, core.BuilderOut))
if err != nil {
slog.Error("error getting build log file", "err", err)

return
}

if err := b.addArtifactsToRepo(map[string]io.Reader{
BuilderOut: log,
core.BuilderOut: log,
}, environmentPath); err != nil {
slog.Error("error sending build log file to core", "err", err)
}
}

func (b *Builder) getExes(s3Path string) ([]string, error) {
exeData, err := b.s3.OpenFile(filepath.Join(s3Path, ExesBasename))
exeData, err := b.s3.OpenFile(filepath.Join(s3Path, core.ExesBasename))
if err != nil {
return nil, err
}
Expand All @@ -401,7 +484,7 @@ func (b *Builder) getExes(s3Path string) ([]string, error) {

func (b *Builder) prepareAndInstallArtifacts(def *Definition, s3Path,
moduleFileData string, exes []string) error {
imageData, err := b.s3.OpenFile(filepath.Join(s3Path, ImageBasename))
imageData, err := b.s3.OpenFile(filepath.Join(s3Path, core.ImageBasename))
if err != nil {
return err
}
Expand Down Expand Up @@ -431,24 +514,24 @@ func (b *Builder) prepareArtifactsFromS3AndSendToCoreAndS3(def *Definition, s3Pa

return b.addArtifactsToRepo(
map[string]io.Reader{
SpackLockFile: bytes.NewReader(lockData),
SoftpackYaml: strings.NewReader(concreteSpackYAMLFile),
SingularityDefBasename: strings.NewReader(singDef),
BuilderOut: logData,
moduleForCoreBasename: strings.NewReader(moduleFileData),
UsageBasename: strings.NewReader(readme),
core.SpackLockFile: bytes.NewReader(lockData),
core.SoftpackYaml: strings.NewReader(concreteSpackYAMLFile),
core.SingularityDefBasename: strings.NewReader(singDef),
core.BuilderOut: logData,
core.ModuleForCoreBasename: strings.NewReader(moduleFileData),
core.UsageBasename: strings.NewReader(readme),
},
def.FullEnvironmentPath(),
)
}

func (b *Builder) getArtifactDataFromS3(s3Path string) (io.Reader, []byte, error) {
logData, err := b.s3.OpenFile(filepath.Join(s3Path, BuilderOut))
logData, err := b.s3.OpenFile(filepath.Join(s3Path, core.BuilderOut))
if err != nil {
return nil, nil, err
}

lockFile, err := b.s3.OpenFile(filepath.Join(s3Path, SpackLockFile))
lockFile, err := b.s3.OpenFile(filepath.Join(s3Path, core.SpackLockFile))
if err != nil {
return nil, nil, err
}
Expand All @@ -469,7 +552,7 @@ func (b *Builder) generateAndUploadSpackYAML(lockData []byte, description string
}

if err = b.s3.UploadData(strings.NewReader(concreteSpackYAMLFile),
filepath.Join(s3Path, SoftpackYaml)); err != nil {
filepath.Join(s3Path, core.SoftpackYaml)); err != nil {
return "", err
}

Expand Down Expand Up @@ -543,7 +626,7 @@ func SpackLockToSoftPackYML(spackLockData []byte, desc string, exes []string) (s
func (b *Builder) generateAndUploadUsageFile(def *Definition, s3Path string) (string, error) {
readme := def.ModuleUsage(b.config.Module.LoadPath)

if err := b.s3.UploadData(strings.NewReader(readme), filepath.Join(s3Path, UsageBasename)); err != nil {
if err := b.s3.UploadData(strings.NewReader(readme), filepath.Join(s3Path, core.UsageBasename)); err != nil {
return "", err
}

Expand Down Expand Up @@ -585,7 +668,7 @@ func (b *Builder) addArtifactsToRepo(artifacts map[string]io.Reader, envPath str

io.Copy(&sb, resp.Body) //nolint:errcheck

return Error(sb.String())
return internal.Error(sb.String())
}

return <-errCh
Expand Down
Loading
Loading