From 0f9d3dafd40629cf7c3aa3db55792429eeaa4159 Mon Sep 17 00:00:00 2001 From: Container Builder Team Date: Tue, 17 Jul 2018 11:31:05 -0400 Subject: [PATCH 1/5] Project import generated by Copybara. PiperOrigin-RevId: 204915387 --- build/build.go | 50 ++++----------------------------------- build/build_test.go | 49 +++++++++++++++++--------------------- common/common.go | 21 ++++++++++++++++ localbuilder_main.go | 14 ++++++----- validate/validate.go | 22 +++++++++-------- validate/validate_test.go | 27 +++++++++++++++++++-- 6 files changed, 92 insertions(+), 91 deletions(-) diff --git a/build/build.go b/build/build.go index b088223b..a0428c9f 100644 --- a/build/build.go +++ b/build/build.go @@ -83,13 +83,6 @@ type Logger interface { MakeWriter(prefix string, stepIdx int, stdout bool) io.Writer } -// EventLogger encapsulates logging events about build steps -// starting/finishing. -type EventLogger interface { - StartStep(ctx context.Context, stepIdx int, startTime time.Time) error - FinishStep(ctx context.Context, stepIdx int, success bool, endTime time.Time) error -} - type imageDigest struct { tag, digest string } @@ -102,7 +95,6 @@ type Build struct { HasMultipleSteps bool TokenSource oauth2.TokenSource Log Logger - EventLogger EventLogger status BuildStatus imageDigests []imageDigest // docker image tag to digest (for built images) stepDigests []string // build step index to digest (for build steps) @@ -183,14 +175,13 @@ type kms interface { // New constructs a new Build. func New(r runner.Runner, b pb.Build, ts oauth2.TokenSource, - bl Logger, eventLogger EventLogger, hostWorkspaceDir string, fs afero.Fs, local, push, dryrun bool) *Build { + bl Logger, hostWorkspaceDir string, fs afero.Fs, local, push, dryrun bool) *Build { return &Build{ Runner: r, Request: b, TokenSource: ts, stepDigests: make([]string, len(b.Steps)), Log: bl, - EventLogger: eventLogger, Done: make(chan struct{}), times: map[BuildStatus]time.Duration{}, lastStateStart: timeNow(), @@ -833,7 +824,7 @@ func (b *Build) getKMSClient() (kms, error) { // when spoofing metadata works by IP. Until then, we'll just fetch the token // and pass it to all HTTP requests. svc, err := cloudkms.New(&http.Client{ - Transport: &tokenTransport{b.TokenSource}, + Transport: &common.TokenTransport{b.TokenSource}, }) if err != nil { return nil, err @@ -883,11 +874,6 @@ func (b *Build) timeAndRunStep(ctx context.Context, idx int, waitChans []chan st b.Timing.BuildSteps[idx] = &TimeSpan{Start: when} b.mu.Unlock() - if err := b.EventLogger.StartStep(ctx, idx, when); err != nil { - log.Printf("Error publishing start-step event: %v", err) - - } - err := b.runStep(ctx, idx) when = timeNow() @@ -905,13 +891,6 @@ func (b *Build) timeAndRunStep(ctx context.Context, idx int, waitChans []chan st } b.mu.Unlock() - // We use a background context to send the FinishStep message because ctx may - // have been timed out or cancelled. - if err := b.EventLogger.FinishStep(context.Background(), idx, err == nil, when); err != nil { - log.Printf("Error publishing finish-step event: %v", err) - - } - // If another step executing in parallel fails and sends an error, this step // will be blocked from sending an error on the channel. // Listen for context cancellation so that the goroutine exits. @@ -1040,11 +1019,8 @@ func (b *Build) runStep(ctx context.Context, idx int) error { } } - if err := b.Runner.Run(ctx, args, nil, outWriter, errWriter, ""); err != nil { - return err - } - - return nil + buildErr := b.Runner.Run(ctx, args, nil, outWriter, errWriter, "") + return buildErr } @@ -1380,21 +1356,3 @@ func (b *Build) pushArtifacts(ctx context.Context) error { return nil } - -// tokenTransport is a RoundTripper that automatically applies OAuth -// credentials from the token source. -// -// This can be replaced by google.DefaultClient when metadata spoofing works by -// IP address (b/33233310). -type tokenTransport struct { - ts oauth2.TokenSource -} - -func (t *tokenTransport) RoundTrip(req *http.Request) (*http.Response, error) { - tok, err := t.ts.Token() - if err != nil { - return nil, err - } - req.Header.Set("Authorization", "Bearer "+tok.AccessToken) - return http.DefaultTransport.RoundTrip(req) -} diff --git a/build/build_test.go b/build/build_test.go index 16c10f56..4c40230c 100644 --- a/build/build_test.go +++ b/build/build_test.go @@ -477,7 +477,7 @@ func TestFetchBuilder(t *testing.T) { for _, tc := range testCases { r := newMockRunner(t, tc.name) r.remotePullsFail = tc.pullsFail - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) var gotErr error var gotDigest string wantDigest := "" @@ -785,7 +785,7 @@ func TestRunBuildSteps(t *testing.T) { r.localImages["gcr.io/build-output-tag-2"] = true return tc.opError } - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) gotErr := b.runBuildSteps(ctx) if !reflect.DeepEqual(gotErr, tc.wantErr) { t.Errorf("%s: Wanted error %q, but got %q", tc.name, tc.wantErr, gotErr) @@ -1045,7 +1045,7 @@ func TestBuildStepOrder(t *testing.T) { } return nil } - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) errorFromFunction := make(chan error) go func() { errorFromFunction <- b.runBuildSteps(ctx) @@ -1096,7 +1096,7 @@ func TestPushImages(t *testing.T) { }} for _, tc := range testCases { r := newMockRunner(t, tc.name) - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, true, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, true, false) r.remotePushesFail = tc.remotePushesFail gotErr := b.pushImages(ctx) if !reflect.DeepEqual(gotErr, tc.wantErr) { @@ -1235,7 +1235,7 @@ func TestPushArtifacts(t *testing.T) { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), false, true, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), false, true, false) b.gsutilHelper = tc.gsutilHelper err := b.pushArtifacts(ctx) @@ -1292,7 +1292,7 @@ func TestPushArtifactsTiming(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, true, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, true, false) b.gsutilHelper = newMockGsutilHelper("gs://some-bucket", 0) if err := b.pushArtifacts(ctx); err != nil { t.Fatalf("b.pushArtifacts() = %v", err) @@ -1347,7 +1347,7 @@ func TestPushAllTiming(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, true, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, true, false) b.gsutilHelper = newMockGsutilHelper("gs://some-bucket", 1) if err := b.pushAll(ctx); err != nil { t.Fatal(err) @@ -1480,7 +1480,7 @@ func TestBuildTiming(t *testing.T) { r.mu.Unlock() return nil } - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) b.runBuildSteps(ctx) buildStepTimes := b.Timing.BuildSteps @@ -1630,7 +1630,7 @@ func TestBuildTimingOutOfOrder(t *testing.T) { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) r.dockerRunHandler = func([]string, io.Writer, io.Writer) error { return nil } - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) b.runBuildSteps(ctx) buildStepTimes := b.Timing.BuildSteps @@ -1745,7 +1745,7 @@ func TestPushImagesTiming(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, true, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, true, false) // We record pushStart for the test case where tc.wantFirstPushTimeOnly // is true. Since fakeTimeNow increments the second counter after each @@ -1819,7 +1819,7 @@ func TestBuildStepConcurrency(t *testing.T) { } // Run the build. - b := New(r, req, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, req, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) ret := make(chan error) go func() { ret <- b.runBuildSteps(ctx) @@ -1858,7 +1858,7 @@ type fakeRunner struct { func (f *fakeRunner) Run(ctx context.Context, args []string, _ io.Reader, _, _ io.Writer, _ string) error { // The "+1" is for the name of the container which is appended to the // dockerRunArgs base command. - b := New(nil, pb.Build{}, nil, nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(nil, pb.Build{}, nil, nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) argCount := len(b.dockerRunArgs("", "", 0)) + 1 switch { case !startsWith(args, "docker", "run"): @@ -1909,7 +1909,7 @@ func dockerRunString(idx int) string { } func dockerRunInStepDir(idx int, stepDir string) string { - b := New(nil, pb.Build{}, nil, nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(nil, pb.Build{}, nil, nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) dockerRunArg := b.dockerRunArgs(stepDir, fmt.Sprintf("/tmp/step-%d/", idx), idx) return strings.Join(dockerRunArg, " ") } @@ -1927,7 +1927,7 @@ func fakeTimeSpan() *TimeSpan { } func TestSummary(t *testing.T) { - b := New(nil, pb.Build{}, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, true, false) + b := New(nil, pb.Build{}, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, true, false) wantBuildStatus := StatusDone wantStepStatus := []pb.Build_Status{pb.Build_SUCCESS, pb.Build_SUCCESS} @@ -2005,7 +2005,7 @@ func TestErrorCollection(t *testing.T) { "got an http status: 300: it's a mystery to me", "got another http status: 300: it's a double mystery", } - b := New(nil, pb.Build{}, nil, nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(nil, pb.Build{}, nil, nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) for _, o := range outputs { b.detectPushFailure(o) } @@ -2074,7 +2074,7 @@ func TestEntrypoint(t *testing.T) { stepArgs <- strings.Join(args, " ") return nil } - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) errorFromFunction := make(chan error) go func() { errorFromFunction <- b.runBuildSteps(ctx) @@ -2166,7 +2166,7 @@ func TestSecrets(t *testing.T) { gotCommand = strings.Join(args, " ") return nil } - b := New(r, buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) b.kms = fakeKMS{ plaintext: c.plaintext, err: c.kmsErr, @@ -2353,7 +2353,7 @@ func TestTimeout(t *testing.T) { } r := newMockRunner(t, "timeout") r.dockerRunHandler = func([]string, io.Writer, io.Writer) error { return nil } - b := New(r, request, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, true, false) + b := New(r, request, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, true, false) b.Start(ctx) <-b.Done @@ -2379,7 +2379,7 @@ func TestCancel(t *testing.T) { } r := newMockRunner(t, "cancel") r.dockerRunHandler = func([]string, io.Writer, io.Writer) error { return nil } - b := New(r, request, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, true, false) + b := New(r, request, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, true, false) fullStatus := b.GetStatus() if fullStatus.BuildStatus != "" { t.Errorf("Build has status before Start(): %v", fullStatus) @@ -2473,7 +2473,7 @@ func TestStart(t *testing.T) { r.localImages["gcr.io/build"] = true return nil } - b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, tc.push, false) + b := New(r, tc.buildRequest, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, tc.push, false) b.Start(ctx) <-b.Done @@ -2496,7 +2496,7 @@ func TestUpdateDockerAccessToken(t *testing.T) { ctx := context.Background() r := newMockRunner(t, "TestUpdateDockerAccessToken") r.dockerRunHandler = func(args []string, _, _ io.Writer) error { return nil } - b := New(r, pb.Build{}, nil, nil, nil, "", afero.NewMemMapFs(), false, false, false) + b := New(r, pb.Build{}, nil, nil, "", afero.NewMemMapFs(), false, false, false) // If UpdateDockerAccessToken is called before SetDockerAccessToken, we // should get an error. @@ -2629,11 +2629,6 @@ func (nopBuildLogger) WriteMainEntry(string) { return } func (nopBuildLogger) Close() error { return nil } func (nopBuildLogger) MakeWriter(string, int, bool) io.Writer { return ioutil.Discard } -type nopEventLogger struct{} - -func (nopEventLogger) StartStep(context.Context, int, time.Time) error { return nil } -func (nopEventLogger) FinishStep(context.Context, int, bool, time.Time) error { return nil } - // Test coverage to confirm that we don't have the data race fixed in cl/190142977. func TestDataRace(t *testing.T) { build := pb.Build{ @@ -2642,7 +2637,7 @@ func TestDataRace(t *testing.T) { } r := newMockRunner(t, "race") r.dockerRunHandler = func([]string, io.Writer, io.Writer) error { return nil } - b := New(r, build, mockTokenSource(), nopBuildLogger{}, nopEventLogger{}, "", afero.NewMemMapFs(), true, false, false) + b := New(r, build, mockTokenSource(), nopBuildLogger{}, "", afero.NewMemMapFs(), true, false, false) ctx := context.Background() go b.Start(ctx) // updates b.Timing.BuildTotal diff --git a/common/common.go b/common/common.go index 87ee386c..a8586599 100644 --- a/common/common.go +++ b/common/common.go @@ -21,6 +21,7 @@ import ( "fmt" "log" "math/rand" + "net/http" "os" "strings" "time" @@ -29,6 +30,7 @@ import ( "github.com/GoogleCloudPlatform/container-builder-local/runner" "github.com/GoogleCloudPlatform/container-builder-local/subst" "github.com/GoogleCloudPlatform/container-builder-local/validate" + "golang.org/x/oauth2" ) const ( @@ -170,3 +172,22 @@ func SubstituteAndValidate(b *pb.Build, substMap map[string]string) error { return nil } + +// TokenTransport is a RoundTripper that automatically applies OAuth +// credentials from the token source. +// +// This can be replaced by google.DefaultClient when metadata spoofing works by +// IP address (b/33233310). +type TokenTransport struct { + Ts oauth2.TokenSource +} + +// RoundTrip executes a single HTTP transaction, obtaining the Response for a given Request. +func (t *TokenTransport) RoundTrip(req *http.Request) (*http.Response, error) { + tok, err := t.Ts.Token() + if err != nil { + return nil, err + } + req.Header.Set("Authorization", "Bearer "+tok.AccessToken) + return http.DefaultTransport.RoundTrip(req) +} diff --git a/localbuilder_main.go b/localbuilder_main.go index 2ec4678e..f3b6dd4d 100644 --- a/localbuilder_main.go +++ b/localbuilder_main.go @@ -64,6 +64,13 @@ func exitUsage(msg string) { } func main() { + // BEGIN GOOGLE-INTERNAL + // This never needs to be mirrored out to GitHub. On GitHub, we're going to + // rename the repo and change the name of the built binary. + if strings.Contains(os.Args[0], "container-builder") { + log.Printf("WARNING: %v is deprecated. Please run `gcloud install cloud-build-local` to install its replacement.", os.Args[0]) + } + // END GOOGLE-INTERNAL flag.Parse() ctx := context.Background() args := flag.Args() @@ -191,7 +198,7 @@ func run(ctx context.Context, source string) error { } } - b := build.New(r, *buildConfig, nil /* TokenSource */, stdoutLogger{}, nopEventLogger{}, volumeName, afero.NewOsFs(), true, *push, *dryRun) + b := build.New(r, *buildConfig, nil /* TokenSource */, stdoutLogger{}, volumeName, afero.NewOsFs(), true, *push, *dryRun) // Do not run the spoofed metadata server on a dryrun. if !*dryRun { @@ -361,8 +368,3 @@ func (pw prefixWriter) Write(b []byte) (int, error) { } return len(b), nil } - -type nopEventLogger struct{} - -func (nopEventLogger) StartStep(context.Context, int, time.Time) error { return nil } -func (nopEventLogger) FinishStep(context.Context, int, bool, time.Time) error { return nil } diff --git a/validate/validate.go b/validate/validate.go index a3ad2eab..0a7f0b52 100644 --- a/validate/validate.go +++ b/validate/validate.go @@ -51,12 +51,9 @@ const ( maxSubstKeyLength = 100 // max length of a substitution key. maxSubstValueLength = 4000 // max length of a substitution value. maxNumSecretEnvs = 100 // max number of unique secret env values. + maxSecretSize = 2048 // max size of a secret maxArtifactsPaths = 100 // max number of artifacts paths that can be specified. - - // Name of the permission required to use a key to decrypt data. - // Documented at https://cloud.google.com/kms/docs/reference/permissions-and-roles - cloudkmsDecryptPermission = "cloudkms.cryptoKeyVersions.useToDecrypt" - maxNumTags = 64 // max length of the list of tags. + maxNumTags = 64 // max length of the list of tags. ) var ( @@ -81,7 +78,6 @@ var ( "/builder/home": struct{}{}, "/var/run/docker.sock": struct{}{}, } - validTagRE = regexp.MustCompile(`^(` + reference.TagRegexp.String() + `)$`) // validImageTagRE ensures only proper characters are used in name and tag. validImageTagRE = regexp.MustCompile(`^(` + reference.NameRegexp.String() + `(@sha256:` + reference.TagRegexp.String() + `|:` + reference.TagRegexp.String() + `)?)$`) // validGCRImageRE ensures proper domain and folder level image for gcr.io. More lenient on the actual characters other than folder structure and domain. @@ -518,7 +514,6 @@ func checkSecrets(b *pb.Build) error { definedSecretEnvs[k] = struct{}{} } } - // Check that all used secret_envs are defined. for used := range usedSecretEnvs { if _, found := definedSecretEnvs[used]; !found { @@ -538,8 +533,8 @@ func checkSecrets(b *pb.Build) error { // Check secret_env max size. for _, sec := range b.Secrets { for k, v := range sec.SecretEnv { - if len(v) > 1024 { - return fmt.Errorf("secretEnv value for %q cannot exceed 1KB", k) + if len(v) > maxSecretSize { + return fmt.Errorf("secretEnv value for %q cannot exceed %dB", k, maxSecretSize) } } } @@ -589,7 +584,14 @@ func checkBuildStepNames(steps []*pb.BuildStep) error { func checkImageNames(images []string) error { for _, image := range images { if !validImageTagRE.MatchString(image) { - return fmt.Errorf("invalid image %q", image) + // If the lowercased string matches the validImageTag regex, then uppercase letters are invalidating the string. + // Return an informative error message to the user. + // Ideally, we could just print out the desired regex or refer to Docker documentation, + // but validImageTagRE is terribly long, and there is no Docker documentation to point to. + if validImageTagRE.MatchString(strings.ToLower(image)) { + return fmt.Errorf("invalid image name %q contains uppercase letters", image) + } + return fmt.Errorf("invalid image name %q", image) } } return nil diff --git a/validate/validate_test.go b/validate/validate_test.go index cd6ddf00..c98ad1a8 100644 --- a/validate/validate_test.go +++ b/validate/validate_test.go @@ -999,11 +999,11 @@ func TestCheckSecrets(t *testing.T) { Secrets: []*pb.Secret{{ KmsKeyName: kmsKeyName, SecretEnv: map[string][]byte{ - "MY_SECRET": []byte(strings.Repeat("a", 2000)), + "MY_SECRET": []byte(strings.Repeat("a", maxSecretSize+1)), }, }}, }, - wantErr: errors.New(`secretEnv value for "MY_SECRET" cannot exceed 1KB`), + wantErr: errors.New(`secretEnv value for "MY_SECRET" cannot exceed 2048B`), }, { desc: "Happy case: Build with acceptable secret values", b: &pb.Build{ @@ -1122,6 +1122,7 @@ var validNames = []string{ "subdomain.gcr.io/works/folder/folder", "gcr.io/works:tag", "gcr.io/works/folder:tag", + "gcr.io/works/folder:Tag", "ubuntu", "ubuntu:latest", "gcr.io/cloud-builders/docker@sha256:blah", @@ -1165,6 +1166,28 @@ func TestCheckImageNames(t *testing.T) { } } +// TestCheckImageNames ensures that there are different error messages +// for invalid images with uppercase letters, and all other invalid images. +func TestCheckImageNamesErrorMessage(t *testing.T) { + invalidImages := []string{ + "UBUNTU:latest", // contains uppercase + "ubnutu::latest", // invalid, but all lowercase + } + wantErr := []error{ + errors.New(`invalid image name "UBUNTU:latest" contains uppercase letters`), + errors.New(`invalid image name "ubnutu::latest"`), + } + for i, img := range invalidImages { + err := checkImageNames([]string{img}) + if err == nil { + t.Fatalf("checkImageNames(%v) did not return error", img) + } + if err.Error() != wantErr[i].Error() { + t.Errorf("got error = %q, want %q", err.Error(), wantErr[i].Error()) + } + } +} + func TestCheckBuildTags(t *testing.T) { var hugeTagList []string for i := 0; i < maxNumTags+1; i++ { From 43d1c33b6e915f00846099f6aa7e1e9ebd3ea053 Mon Sep 17 00:00:00 2001 From: Cloud Build Team Date: Tue, 24 Jul 2018 09:55:02 -0400 Subject: [PATCH 2/5] Project import generated by Copybara. PiperOrigin-RevId: 205821678 --- CONTRIBUTING.md | 27 ++-- README.md | 56 +++---- build/build.go | 53 ++++--- build/build_test.go | 63 ++++++-- cloudbuild_tag.yaml | 4 +- common/common.go | 6 +- gcloud/gcloud.go | 4 +- gsutil/gsutil.go | 139 +++++------------ gsutil/gsutil_test.go | 196 +++++------------------- integration_tests/gce_startup_script.sh | 4 +- integration_tests/test_script.sh | 46 +++--- localbuilder_main.go | 20 +-- logger/logger.go | 27 ++++ metadata/metadata.go | 12 +- validate/validate.go | 12 +- validate/validate_test.go | 9 ++ volume/volume.go | 2 +- 17 files changed, 293 insertions(+), 387 deletions(-) create mode 100644 logger/logger.go diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 755ba80c..2e72e0ed 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -1,5 +1,4 @@ -Contributor License Agreements ------------------------------- +## Contributor License Agreements **This project is not yet set up to accept external contributions.** @@ -8,16 +7,16 @@ Contributor License Agreements Before we can accept your pull requests you'll need to sign a Contributor License Agreement (CLA): -* If you are an individual writing original source code and you own the - intellectual property, then you'll need to sign an - [individual CLA](https://developers.google.com/open-source/cla/individual). +* If you are an individual writing original source code and you own the + intellectual property, then you'll need to sign an + [individual CLA](https://developers.google.com/open-source/cla/individual). -* If you work for a company that wants to allow you to contribute your work, - then you'll need to sign a - [corporate CLA](https://developers.google.com/open-source/cla/corporate>). +* If you work for a company that wants to allow you to contribute your work, + then you'll need to sign a + [corporate CLA](https://developers.google.com/open-source/cla/corporate>). -You can sign these electronically (just scroll to the bottom). After that, -we'll be able to accept your pull requests. +You can sign these electronically (just scroll to the bottom). After that, we'll +be able to accept your pull requests. ## Developing the Local Builder @@ -29,18 +28,18 @@ To build and test the Local Builder, you need a working Run the following commands to install the Local Builder tool: ``` -go get github.com/GoogleCloudPlatform/container-builder-local -go install github.com/GoogleCloudPlatform/container-builder-local +go get github.com/GoogleCloudPlatform/cloud-build-local +go install github.com/GoogleCloudPlatform/cloud-build-local ``` To run a build: ``` -./bin/container-builder-local --dryrun=false --config=path/to/cloudbuild.yaml path/to/code +./bin/cloud-build-local --dryrun=false --config=path/to/cloudbuild.yaml path/to/code ``` To run the tests for Local Builder (without the vendored libraries): ``` -go test $(go list github.com/GoogleCloudPlatform/container-builder-local/... | grep -v vendor) +go test $(go list github.com/GoogleCloudPlatform/cloud-build-local/... | grep -v vendor) ``` diff --git a/README.md b/README.md index fcac93ef..fb51f385 100644 --- a/README.md +++ b/README.md @@ -1,26 +1,27 @@ -# Google Container Builder Local Builder +# Google Cloud Build Local Builder -**Local Builder** runs [Google Container Builder] locally, -allowing easier debugging, execution of builds on your own hardware, -and integration into local build and test workflows. +**Local Builder** runs [Google Cloud Build] locally, allowing easier debugging, +execution of builds on your own hardware, and integration into local build and +test workflows. ----- +-------------------------------------------------------------------------------- ## Prerequisites 1. Ensure you have installed: - * [gcloud](https://cloud.google.com/sdk/docs/quickstarts) - * [Docker](https://www.docker.com/) - * [Go](https://golang.org/doc/install) (if you want to compile Local - Builder from source) + + * [gcloud](https://cloud.google.com/sdk/docs/quickstarts) + * [Docker](https://www.docker.com/) + * [Go](https://golang.org/doc/install) (if you want to compile Local + Builder from source) 2. If the build needs to access a private Google Container Registry, install and configure the [Docker credential helper](https://github.com/GoogleCloudPlatform/docker-credential-gcr) for Google Container Registry. -3. Configure your project for the gcloud tool, where `[PROJECT_ID]` is - your Cloud Platform project ID: +3. Configure your project for the gcloud tool, where `[PROJECT_ID]` is your + Cloud Platform project ID: ``` gcloud config set project [PROJECT-ID] @@ -31,49 +32,50 @@ and integration into local build and test workflows. 1. Install by running the following command: ``` - gcloud components install container-builder-local + gcloud components install cloud-build-local ``` - After successful installation, you will have `container-builder-local` in - your PATH as part of the Google Cloud SDK binaries. + After successful installation, you will have `cloud-build-local` in your + PATH as part of the Google Cloud SDK binaries. 2. To see all of the commands, run: ``` - $ container-builder-local --help + $ cloud-build-local --help ``` - The Local Builder's command is `$ container-builder-local`. - + The Local Builder's command is `$ cloud-build-local`. ## Download the latest binaries The latest binaries are available in a GCS bucket. -[Download](https://storage.googleapis.com/container-builder-local/container-builder-local_latest.tar.gz) the latest binaries from GCS. +[Download](https://storage.googleapis.com/cloud-build-local/cloud-build-local_latest.tar.gz) +the latest binaries from GCS. To run a build: ``` -./container-builder-local_{linux,darwin}_{386,amd64}-v --dryrun=false --config=path/to/cloudbuild.yaml path/to/code +./cloud-build-local_{linux,darwin}_{386,amd64}-v --dryrun=false --config=path/to/cloudbuild.yaml path/to/code ``` ## Developing and contributing to the Local Builder -See the [contributing instructions](https://github.com/GoogleCloudPlatform/container-builder-local/blob/master/CONTRIBUTING.md). +See the +[contributing instructions](https://github.com/GoogleCloudPlatform/cloud-build-local/blob/master/CONTRIBUTING.md). ## Limitations -* Only one build can be run at a time on a given host. -* The tool works on the following platforms: - * Linux - * macOS +* Only one build can be run at a time on a given host. +* The tool works on the following platforms: + * Linux + * macOS ## Support File issues here on gitHub, email `gcr-contact@google.com`, or join our -[Slack channel] if you have general questions about Local Builder or -Container Builder. +[Slack channel] if you have general questions about Local Builder or Container +Builder. -[Google Container Builder]: http://cloud.google.com/container-builder/ +[Google Cloud Build]: http://cloud.google.com/cloud-build/ [Slack channel]: https://googlecloud-community.slack.com/messages/C4KCRJL4D/details/ diff --git a/build/build.go b/build/build.go index a0428c9f..df29cedd 100644 --- a/build/build.go +++ b/build/build.go @@ -36,10 +36,11 @@ import ( pb "google.golang.org/genproto/googleapis/devtools/cloudbuild/v1" "github.com/golang/protobuf/ptypes" - "github.com/GoogleCloudPlatform/container-builder-local/common" - "github.com/GoogleCloudPlatform/container-builder-local/gsutil" - "github.com/GoogleCloudPlatform/container-builder-local/runner" - "github.com/GoogleCloudPlatform/container-builder-local/volume" + "github.com/GoogleCloudPlatform/cloud-build-local/common" + "github.com/GoogleCloudPlatform/cloud-build-local/gsutil" + "github.com/GoogleCloudPlatform/cloud-build-local/logger" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/volume" "github.com/spf13/afero" "google.golang.org/api/cloudkms/v1" "golang.org/x/oauth2" @@ -76,13 +77,6 @@ var ( timeNow = time.Now ) -// Logger encapsulates logging build output. -type Logger interface { - WriteMainEntry(msg string) - Close() error - MakeWriter(prefix string, stepIdx int, stdout bool) io.Writer -} - type imageDigest struct { tag, digest string } @@ -94,7 +88,7 @@ type Build struct { Request pb.Build HasMultipleSteps bool TokenSource oauth2.TokenSource - Log Logger + Log logger.Logger status BuildStatus imageDigests []imageDigest // docker image tag to digest (for built images) stepDigests []string // build step index to digest (for build steps) @@ -175,7 +169,7 @@ type kms interface { // New constructs a new Build. func New(r runner.Runner, b pb.Build, ts oauth2.TokenSource, - bl Logger, hostWorkspaceDir string, fs afero.Fs, local, push, dryrun bool) *Build { + bl logger.Logger, hostWorkspaceDir string, fs afero.Fs, local, push, dryrun bool) *Build { return &Build{ Runner: r, Request: b, @@ -190,7 +184,7 @@ func New(r runner.Runner, b pb.Build, ts oauth2.TokenSource, local: local, push: push, dryrun: dryrun, - gsutilHelper: gsutil.New(r, fs), + gsutilHelper: gsutil.New(r, fs, bl), fs: fs, } } @@ -1294,6 +1288,12 @@ func (b *Build) pushImages(ctx context.Context) error { return nil } +// GCS URL to bucket +func extractGCSBucket(url string) string { + toks := strings.SplitN(strings.TrimPrefix(url, "gs://"), "/", 2) + return fmt.Sprintf("gs://%s", toks[0]) +} + var newUUID = uuid.New // pushArtifacts pushes ArtifactObjects to a specified bucket. @@ -1302,14 +1302,16 @@ func (b *Build) pushArtifacts(ctx context.Context) error { return nil } - // Check that the GCS bucket exists. + // Only verify that the GCS bucket exists. + // If they specify a directory path in the bucket that doesn't exist, gsutil will create it for them. - bucket := b.Request.Artifacts.Objects.Location + location := b.Request.Artifacts.Objects.Location + bucket := extractGCSBucket(location) if err := b.gsutilHelper.VerifyBucket(ctx, bucket); err != nil { return err } - // Upload specified artifacts from the workspace to the GCS bucket. + // Upload specified artifacts from the workspace to the GCS location. workdir := containerWorkspaceDir if dir := b.Request.GetSource().GetRepoSource().GetDir(); dir != "" { workdir = path.Join(workdir, dir) @@ -1324,30 +1326,31 @@ func (b *Build) pushArtifacts(ctx context.Context) error { b.Timing.ArtifactsPushes = &TimeSpan{Start: timeNow()} b.mu.Unlock() - b.Log.WriteMainEntry(fmt.Sprintf("Artifacts will be uploaded to %s", bucket)) + b.Log.WriteMainEntry(fmt.Sprintf("Artifacts will be uploaded to %s using gsutil cp", bucket)) results := []*pb.ArtifactResult{} for _, src := range b.Request.Artifacts.Objects.Paths { - b.Log.WriteMainEntry(fmt.Sprintf("%s: uploading matching files...", src)) - r, err := b.gsutilHelper.UploadArtifacts(ctx, flags, src, bucket) + b.Log.WriteMainEntry(fmt.Sprintf("%s: Uploading path....", src)) + r, err := b.gsutilHelper.UploadArtifacts(ctx, flags, src, location) if err != nil { - return fmt.Errorf("could not upload %s to %s; err = %v", src, bucket, err) + return fmt.Errorf("could not upload %s to %s; err = %v", src, location, err) } results = append(results, r...) b.Log.WriteMainEntry(fmt.Sprintf("%s: %d matching files uploaded", src, len(r))) } numArtifacts := int64(len(results)) - b.Log.WriteMainEntry(fmt.Sprintf("%d total artifacts uploaded to %s", numArtifacts, bucket)) + b.Log.WriteMainEntry(fmt.Sprintf("%d total artifacts uploaded to %s", numArtifacts, location)) b.mu.Lock() b.Timing.ArtifactsPushes.End = timeNow() b.mu.Unlock() - // Write a JSON manifest for the artifacts and upload to the GCS bucket. + // Write a JSON manifest for the artifacts and upload to the GCS location. filename := fmt.Sprintf("artifacts-%s.json", b.Request.Id) - artifactManifest, err := b.gsutilHelper.UploadArtifactsManifest(ctx, flags, filename, bucket, results) + b.Log.WriteMainEntry(fmt.Sprintf("Uploading manifest %s", filename)) + artifactManifest, err := b.gsutilHelper.UploadArtifactsManifest(ctx, flags, filename, location, results) if err != nil { - return fmt.Errorf("could not upload %s to %s; err = %v", filename, bucket, err) + return fmt.Errorf("could not upload %s to %s; err = %v", filename, location, err) } b.Log.WriteMainEntry(fmt.Sprintf("Artifact manifest located at %s", artifactManifest)) diff --git a/build/build_test.go b/build/build_test.go index 4c40230c..3bd19f38 100644 --- a/build/build_test.go +++ b/build/build_test.go @@ -33,8 +33,8 @@ import ( "time" durpb "github.com/golang/protobuf/ptypes/duration" - "github.com/GoogleCloudPlatform/container-builder-local/gsutil" - "github.com/GoogleCloudPlatform/container-builder-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/gsutil" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" "github.com/spf13/afero" "golang.org/x/oauth2" "github.com/pborman/uuid" @@ -1139,13 +1139,15 @@ func TestPushImages(t *testing.T) { } type mockGsutilHelper struct { + location string bucket string numArtifacts int // define num of artifacts to upload } -func newMockGsutilHelper(bucket string, numArtifacts int) mockGsutilHelper { +func newMockGsutilHelper(location string, numArtifacts int) mockGsutilHelper { return mockGsutilHelper{ - bucket: strings.TrimSuffix(bucket, "/"), // remove any trailing slash for consistency + location: strings.TrimSuffix(location, "/"), // remove any trailing slash for consistency + bucket: extractGCSBucket(location), numArtifacts: numArtifacts, } } @@ -1163,7 +1165,7 @@ func (g mockGsutilHelper) UploadArtifacts(ctx context.Context, flags gsutil.Dock // NB: We do not care about the actual ArtifactResult contents for build_test, just the number produced. uuid := newUUID() // unique ArtifactResult artifacts = append(artifacts, &pb.ArtifactResult{ - Location: g.bucket + "/artifact-" + uuid, + Location: g.location + "/artifact-" + uuid, FileHash: []*pb.FileHashes{{ FileHash: []*pb.Hash{{Type: pb.Hash_MD5, Value: []byte("md5" + uuid)}}}, }, @@ -1178,7 +1180,34 @@ func (g mockGsutilHelper) UploadArtifactsManifest(ctx context.Context, flags gsu return strings.Join([]string{b, manifest}, "/"), nil } +func TestExtractGCSBucket(t *testing.T) { + testCases := []struct { + url, wantBucket string + }{{ + url: "gs://bucket", + wantBucket: "gs://bucket", + }, { + url: "gs://bucket/", + wantBucket: "gs://bucket", + }, { + url: "gs://bucket/some/path", + wantBucket: "gs://bucket", + }, { + url: "gs://bucket/some/path/", + wantBucket: "gs://bucket", + }, { + url: "", + wantBucket: "gs://", + }} + for _, tc := range testCases { + if gotBucket := extractGCSBucket(tc.url); gotBucket != tc.wantBucket { + t.Errorf("got %s, want %s", gotBucket, tc.wantBucket) + } + } +} + // TestPushArtifacts checks that after artifacts are uploaded, an ArtifactsInfo object is assigned to the build's artifacts field. + func TestPushArtifacts(t *testing.T) { ctx := context.Background() newUUID = func() string { return "someuuid" } @@ -1193,7 +1222,7 @@ func TestPushArtifacts(t *testing.T) { wantArtifactsInfo ArtifactsInfo wantErr bool }{{ - name: "SuccessPushArtifacts", + name: "PushArtifacts", buildRequest: pb.Build{ Id: buildID, Artifacts: &pb.Artifacts{ @@ -1209,17 +1238,33 @@ func TestPushArtifacts(t *testing.T) { NumArtifacts: 1, }, }, { - name: "SuccessNoArtifactsField", + name: "NoArtifactsField", buildRequest: commonBuildRequest, gsutilHelper: newMockGsutilHelper("gs://some-bucket", 0), }, { - name: "SuccessNoObjectsField", + name: "NoObjectsField", buildRequest: pb.Build{ Artifacts: &pb.Artifacts{}, }, gsutilHelper: newMockGsutilHelper("gs://some-bucket", 0), }, { - name: "ErrorBucketDoesNotExist", + name: "BucketExistsNewPath", + buildRequest: pb.Build{ + Id: buildID, + Artifacts: &pb.Artifacts{ + Objects: &pb.Artifacts_ArtifactObjects{ + Location: "gs://some-bucket/longer/path/not/in/existence", + Paths: []string{"artifact.txt"}, + }, + }, + }, + gsutilHelper: newMockGsutilHelper("gs://some-bucket", 1), + wantArtifactsInfo: ArtifactsInfo{ + ArtifactManifest: "gs://some-bucket/longer/path/not/in/existence/artifacts-" + buildID + ".json", + NumArtifacts: 1, + }, + }, { + name: "ErrBucketDoesNotExist", buildRequest: pb.Build{ Artifacts: &pb.Artifacts{ Objects: &pb.Artifacts_ArtifactObjects{ diff --git a/cloudbuild_tag.yaml b/cloudbuild_tag.yaml index 607ed824..a35a07cf 100644 --- a/cloudbuild_tag.yaml +++ b/cloudbuild_tag.yaml @@ -23,8 +23,8 @@ steps: for GOOS in darwin linux; do for GOARCH in 386 amd64; do # Build binary with the new tag and with 'latest' - GOOS=$$GOOS GOARCH=$$GOARCH /builder/bin/go.bash build -o container-builder-local_$${GOOS}_$${GOARCH}-$TAG_NAME github.com/GoogleCloudPlatform/container-builder-local - GOOS=$$GOOS GOARCH=$$GOARCH /builder/bin/go.bash build -o cloud-build-local_$${GOOS}_$${GOARCH}-$TAG_NAME github.com/GoogleCloudPlatform/container-builder-local + GOOS=$$GOOS GOARCH=$$GOARCH /builder/bin/go.bash build -o container-builder-local_$${GOOS}_$${GOARCH}-$TAG_NAME github.com/GoogleCloudPlatform/cloud-build-local + GOOS=$$GOOS GOARCH=$$GOARCH /builder/bin/go.bash build -o cloud-build-local_$${GOOS}_$${GOARCH}-$TAG_NAME github.com/GoogleCloudPlatform/cloud-build-local done done tar -czvf container-builder-local_latest.tar.gz container-builder-local_* diff --git a/common/common.go b/common/common.go index a8586599..7f7c9c79 100644 --- a/common/common.go +++ b/common/common.go @@ -27,9 +27,9 @@ import ( "time" pb "google.golang.org/genproto/googleapis/devtools/cloudbuild/v1" - "github.com/GoogleCloudPlatform/container-builder-local/runner" - "github.com/GoogleCloudPlatform/container-builder-local/subst" - "github.com/GoogleCloudPlatform/container-builder-local/validate" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/subst" + "github.com/GoogleCloudPlatform/cloud-build-local/validate" "golang.org/x/oauth2" ) diff --git a/gcloud/gcloud.go b/gcloud/gcloud.go index 90cc0566..f2e67ff2 100644 --- a/gcloud/gcloud.go +++ b/gcloud/gcloud.go @@ -26,8 +26,8 @@ import ( "strings" "time" - "github.com/GoogleCloudPlatform/container-builder-local/metadata" - "github.com/GoogleCloudPlatform/container-builder-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/metadata" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" ) var ( diff --git a/gsutil/gsutil.go b/gsutil/gsutil.go index 631cb7bb..1db22197 100644 --- a/gsutil/gsutil.go +++ b/gsutil/gsutil.go @@ -31,7 +31,8 @@ import ( "strings" pb "google.golang.org/genproto/googleapis/devtools/cloudbuild/v1" - "github.com/GoogleCloudPlatform/container-builder-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/logger" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" "github.com/spf13/afero" "github.com/pborman/uuid" ) @@ -64,11 +65,16 @@ type Helper interface { type RealHelper struct { runner runner.Runner fs afero.Fs + logger logger.Logger } // New returns a new RealHelper struct. -func New(r runner.Runner, fs afero.Fs) RealHelper { - return RealHelper{runner: r, fs: fs} +func New(r runner.Runner, fs afero.Fs, logger logger.Logger) RealHelper { + return RealHelper{ + runner: r, + fs: fs, + logger: logger, + } } // VerifyBucket returns nil if the bucket exists, otherwise an error. @@ -108,6 +114,7 @@ func (g RealHelper) UploadArtifacts(ctx context.Context, flags DockerFlags, src, // Create a temp file for gsutil manifest. This manifest is used when making calls to "gsutil cp." // The user should not see the gsutil manifest after the upload. + f := fmt.Sprintf("manifest_%s.log", newUUID()) tmpfile, err := afero.TempFile(g.fs, flags.Tmpdir, f) @@ -117,28 +124,16 @@ func (g RealHelper) UploadArtifacts(ctx context.Context, flags DockerFlags, src, defer tmpfile.Close() gsutilManifest := tmpfile.Name() - // Find files in the workspace directory that match the glob string. - srcFiles, err := g.glob(ctx, flags, src) - if err != nil { - return nil, fmt.Errorf("error finding matching files for %q: err = %v", src, err) - } - if len(srcFiles) == 0 { - return nil, nil - } - - // Copy matching files to the GCS bucket. - - - - for _, src := range srcFiles { - if output, err := g.runGsutil(ctx, flags, "cp", "-L", gsutilManifest, src, adjustDest(src, dest)); err != nil { - log.Printf("gsutil could not copy artifact %q to %q:\n%s", src, dest, output) - return nil, err - } + // Copy matching files to the GCS bucket + tag := src // prefixed to logs + if output, err := g.runGsutil(ctx, tag, flags, "-m", "cp", "-L", gsutilManifest, src, dest); err != nil { + log.Printf("gsutil could not copy artifact %q to %q:\n%s", src, dest, output) + return nil, err } results, err := g.parseGsutilManifest(gsutilManifest) if err != nil { + log.Printf("gsutil could not parse manifest: %v", err) return nil, err } @@ -163,7 +158,7 @@ func (g RealHelper) UploadArtifactsManifest(ctx context.Context, flags DockerFla defer g.fs.Remove(manifestPath) // Upload manifest to the GCS bucket. - if output, err := g.runGsutil(ctx, flags, "cp", manifestPath, bucket); err != nil { + if output, err := g.runGsutil(ctx, "", flags, "cp", manifestPath, bucket); err != nil { log.Printf("gsutil could not copy artifact manifest %q to %q:\n%s", manifestPath, bucket, output) return "", err } @@ -193,53 +188,7 @@ func (g RealHelper) createArtifactsManifest(manifestPath string, results []*pb.A return nil } -// glob searches the workspace directory for source files that match the glob src string, and returns a string array of file paths. -func (g RealHelper) glob(ctx context.Context, flags DockerFlags, src string) ([]string, error) { - - if flags.Workvol == "" { - return nil, errors.New("flags.Workvol has no value") - } - if flags.Workdir == "" { - return nil, errors.New("flags.Workdir has no value") - } - - // Docker run args for gsutil. - args := []string{"docker", "run", - // Assign container name. - "--name", fmt.Sprintf("cloudbuild_gsutil_%s", newUUID()), - // Remove the container when it exits. - "--rm", - // Mount the project workspace, - "--volume", flags.Workvol, - // Run gsutil from the workspace dir. - "--workdir", flags.Workdir, - // Set bash entrypoint to enable multiple gsutil commands. - "--entrypoint", "bash"} - - args = append(args, "ubuntu") - args = append(args, "-c") - // Enable globbing and find files that match the glob string. This will include hidden files. - // We prefix source with "./" so that wildcarding works. Why does "./" make a difference? No idea. ¯\_(ツ)_/¯ - // Note that resulting filepath matches will also have "./" prefixed. - - args = append(args, fmt.Sprintf("shopt -s globstar; find ./%s -type f", src)) - - output, err := g.runAndScrape(ctx, args) - if err != nil { - if strings.Contains(output, errFileNotFound) { - // We can't just return the error because it will say 'exit status 1', which is uninformative. - return nil, fmt.Errorf("no files match glob string %q: %v", src, err) - } - // Note: this error will be uninformative and just say 'exit status 1'. - - return nil, err - } - - // Remove leading and trailing newlines so the split doesn't result in empty string elements. - return strings.Split(strings.Trim(output, "\n"), "\n"), nil -} - -func (g RealHelper) runGsutil(ctx context.Context, flags DockerFlags, cmd ...string) (string, error) { +func (g RealHelper) runGsutil(ctx context.Context, tag string, flags DockerFlags, cmd ...string) (string, error) { if flags.Workvol == "" { return "", errors.New("flags.Workvol has no value") } @@ -260,17 +209,25 @@ func (g RealHelper) runGsutil(ctx context.Context, flags DockerFlags, cmd ...str // Run gsutil from the workspace dir. "--workdir", flags.Workdir, // Connect to the network for metadata to get credentials. - "--network", "cloudbuild"} + "--network", "cloudbuild", + // Set bash entrypoint. + // For reasons currently unknown, a bash entrypoint and a -c parameter is required for wildcarding. + // Otherwise, any gsutil arguments with wildcards will not expand. Enclosing the source in single quotes does not help. + + "--entrypoint", "bash"} if flags.Tmpdir != "" { // Mount the temporary directory. args = append(args, []string{"--volume", fmt.Sprintf("%s:%s", flags.Tmpdir, flags.Tmpdir)}...) } // Add gsutil docker image and commands. args = append(args, "gcr.io/cloud-builders/gsutil") - args = append(args, cmd...) + args = append(args, "-c") + command := "gsutil " + strings.Join(cmd, " ") + args = append(args, command) - // We return the string output from the command run, but it's only intended to be used for debugging and testing. - return g.runAndScrape(ctx, args) + // If a tag is specified, we should stream the logs. Otherwise, run normally. + hasLogging := tag != "" + return g.runWithOptionalLogging(ctx, hasLogging, tag, args) } // getGeneration takes a GCS object URL as input and returns the URL with the generation number suffixed. @@ -281,7 +238,7 @@ func (g RealHelper) getGeneration(ctx context.Context, flags DockerFlags, url st // List existing object with generation number information. // See https://cloud.google.com/storage/docs/gsutil/commands/ls. - output, err := g.runGsutil(ctx, flags, "ls", "-a", url) + output, err := g.runGsutil(ctx, "", flags, "ls", "-a", url) if err != nil { return "", err } @@ -360,35 +317,17 @@ func (g RealHelper) parseGsutilManifest(manifestPath string) ([]*pb.ArtifactResu return artifacts, nil } -// runAndScrape executes the command and returns the output (stdin, stderr), without logging. -func (g RealHelper) runAndScrape(ctx context.Context, cmd []string) (string, error) { - + +func (g RealHelper) runWithOptionalLogging(ctx context.Context, hasLogging bool, tag string, cmd []string) (string, error) { var buf bytes.Buffer outWriter := io.Writer(&buf) errWriter := io.Writer(&buf) - err := g.runner.Run(ctx, cmd, nil, outWriter, errWriter, "") - return buf.String(), err -} -// adjustDest takes any leading directory filepath from src and suffixes it to dest. -// e.g. adjustDest("/nested/path/test.xml", "gs://some-bucket/dir/") = "gs://some-bucket/dir/nested/path/" -// -// Why? The 'gsutil cp' command copies a file from a local directory to a remote GCS bucket. https://cloud.google.com/storage/docs/gsutil/commands/cp -// The local file and GCS bucket are specified with file paths. -// Consider the following: -// source = "/nested/path/test.xml" # source path -// dest = "gs://some-bucket/dir/" # bucket path -// > gsutil cp source dest -// The resulting GCS object path will be "gs://some-bucket/dir/test.xml". This is problematic when we want to copy multiple source -// files with the same name that span different directories. The gsutil tool will ignore the source file's leading directory, and -// will copy all matching files to the same GCS object path. To avoid filename collision and overwriting, we maintain the directory structure -// by adjusting the bucket path to include the source file's leading directory. -func adjustDest(src, dest string) string { - - leadDir := path.Dir(src) - if len(leadDir) > 1 { - // Note: dest already has trailing slash, and if we do path.Join(), "gs://" will lose a slash. - return fmt.Sprintf("%s%s/", dest, strings.TrimPrefix(leadDir, "/")) + if hasLogging { + outWriter = io.MultiWriter(g.logger.MakeWriter(tag, -1, true), &buf) + errWriter = io.MultiWriter(g.logger.MakeWriter(tag, -1, false), &buf) } - return dest + + err := g.runner.Run(ctx, cmd, nil, outWriter, errWriter, "") + return buf.String(), err } diff --git a/gsutil/gsutil_test.go b/gsutil/gsutil_test.go index 6504eebf..317b4ed0 100644 --- a/gsutil/gsutil_test.go +++ b/gsutil/gsutil_test.go @@ -20,6 +20,7 @@ import ( "errors" "fmt" "io" + "io/ioutil" "regexp" "strings" "testing" @@ -33,6 +34,13 @@ import ( var joinedHeaders = strings.Join(csvHeaders, ",") +// noopLogger is a Logger that does nothing. +type noopLogger struct{} + +func (noopLogger) WriteMainEntry(msg string) {} +func (noopLogger) Close() error { return nil } +func (noopLogger) MakeWriter(string, int, bool) io.Writer { return ioutil.Discard } + type mockRunner struct { t *testing.T @@ -123,6 +131,13 @@ func (r *mockRunner) Clean() error { // gsutil simulates gsutil commands in the mockrunner. func (r *mockRunner) gsutil(args []string, in io.Reader, out, err io.Writer) error { + if startsWith(args, "-c") { + // Parse the gsutil commands from the -c field. + cmds := strings.Split(args[1], " ") + if cmds[0] == "gsutil" && len(cmds) > 1 { + return r.gsutil(cmds[1:], in, out, err) + } + } if startsWith(args, "ls") { // Simulate 'gsutil ls' command (https://cloud.google.com/storage/docs/gsutil/commands/ls). lastArg := args[len(args)-1] @@ -173,7 +188,7 @@ func (r *mockRunner) gsutil(args []string, in io.Reader, out, err io.Writer) err return nil } - if startsWith(args, "cp") { + if startsWith(args, "cp") || startsWith(args, "-m", "cp") { // Simulate 'gsutil cp' command (https://cloud.google.com/storage/docs/gsutil/commands/cp). if contains(args, "-L") { // -L option is present when gsutil copies source file to destination bucket. @@ -192,7 +207,9 @@ func (r *mockRunner) gsutil(args []string, in io.Reader, out, err io.Writer) err // Verify source file to copy exists in our mocked local environment. // We won't do wildcard/regex matching here, so tests should specify a file. src := args[len(args)-2] - exists, err := afero.Exists(r.fs, src) + // Remove the enclosing single quotes and prefixed "./" that is added to the source. + trimmedSrc := strings.TrimPrefix(strings.Replace(src, "'", "", 2), "./") + exists, err := afero.Exists(r.fs, trimmedSrc) if err != nil { return err } @@ -326,7 +343,7 @@ func TestVerifyBucket(t *testing.T) { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) - gsutilHelper := New(r, afero.NewMemMapFs()) + gsutilHelper := New(r, afero.NewMemMapFs(), noopLogger{}) err := gsutilHelper.VerifyBucket(ctx, tc.bucket) if err == nil && tc.wantErr { t.Errorf("got gsutilHelper.VerifyBucket(%s) = %v, want error", tc.bucket, err) @@ -401,7 +418,7 @@ func TestUploadArtifacts(t *testing.T) { r := newMockRunner(t, tc.name) r.manifestFile = toManifest(tc.manifestItems...) r.fs = fs - gsutilHelper := New(r, fs) + gsutilHelper := New(r, fs, noopLogger{}) for k, v := range tc.objectGenerations { // We add the objectGenerations to the runner objectGenerations map otherwise @@ -415,7 +432,7 @@ func TestUploadArtifacts(t *testing.T) { } } - // NB: The destination bucket's existence is checked before any artifacts are uploaded, so it's value here does not matter. + // NB: The destination bucket's existence is checked before any artifacts are uploaded, so its value here does not matter. results, err := gsutilHelper.UploadArtifacts(ctx, tc.flags, tc.source, "gs://some-bucket") if tc.wantError { if err == nil { @@ -435,102 +452,6 @@ func TestUploadArtifacts(t *testing.T) { t.Errorf("got results[%d] = %+v,\nwant %+v", i, item, tc.wantResults[i]) } } - - // For gcr.io/cloud-builders/gsutil cp commmands run in a docker container, a "./" must be prefixed to the source URL in order for gsutil wildcarding to work. - wantSource := fmt.Sprintf("./%s", tc.source) - for _, c := range r.commands { - if strings.Contains(c, "cp") && strings.Contains(c, tc.source) && !strings.Contains(c, wantSource) { - t.Errorf("got source = %q, only want source %s; all source URLs must be prefixed with %q: args =[%+v]", tc.source, wantSource, "./", c) - } - } - }) - } -} - -func TestGlob(t *testing.T) { - ctx := context.Background() - newUUID = func() string { return "someuuid" } - defer func() { newUUID = uuid.New }() - - testCases := []struct { - name string - flags DockerFlags - src string // glob string - stdout string // specifies standard output in the mockRunner - stderr string // specifies error output in mockRunner - wantFiles []string - wantCommands []string - wantError bool - }{{ - name: "OneMatchingFile", - flags: DockerFlags{Workvol: "workvol", Workdir: "workdir"}, - src: "foo.xml", - stdout: "foo.xml\n", - wantFiles: []string{"foo.xml"}, - wantCommands: []string{ - "docker run --name cloudbuild_gsutil_" + newUUID() + - " --rm --volume workvol --workdir workdir --entrypoint bash ubuntu -c" + - " shopt -s globstar; find ./foo.xml -type f", - }, - }, { - name: "MultipleMatchingFiles", - flags: DockerFlags{Workvol: "workvol", Workdir: "workdir"}, - src: "*.xml", - stdout: "foo.xml\nbar.xml\n", - wantFiles: []string{"foo.xml", "bar.xml"}, - wantCommands: []string{ - "docker run --name cloudbuild_gsutil_" + newUUID() + - " --rm --volume workvol --workdir workdir --entrypoint bash ubuntu -c" + - " shopt -s globstar; find ./*.xml -type f", - }, - }, { - name: "NoMatchingFiles", - flags: DockerFlags{Workvol: "workvol", Workdir: "workdir"}, - src: "idonotexist.xml", - stderr: "idonotexist.xml: No such file or directory\n", - wantCommands: []string{ - "docker run --name cloudbuild_gsutil_" + newUUID() + - " --rm --volume workvol --workdir workdir --entrypoint bash ubuntu -c" + - " shopt -s globstar; find ./idonotexist.xml -type f", - }, - wantError: true, - }, { - name: "ErrorMissingWorkvol", - flags: DockerFlags{Workdir: "workdir"}, - wantError: true, - }, { - name: "ErrorMissingWorkdir", - flags: DockerFlags{Workvol: "workvol"}, - wantError: true, - }} - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - r := newMockRunner(t, tc.name) - r.stdout = tc.stdout - r.stderr = tc.stderr - gsutilHelper := New(r, afero.NewMemMapFs()) - - files, err := gsutilHelper.glob(ctx, tc.flags, tc.src) - if tc.wantError { - if err == nil { - t.Errorf("glob(): got err = nil, want error") - } - return // desired behavior - } - if err != nil { - t.Errorf("glob(): err := %v", err) - } - if err := checkCommands(r.commands, tc.wantCommands); err != nil { - t.Errorf("checkCommands(): err = \n%v", err) - } - if len(files) != len(tc.wantFiles) { - t.Fatalf("got %d files = %+v, want %d files = %+v", len(files), files, len(tc.wantFiles), tc.wantFiles) - } - for i, f := range files { - if f != tc.wantFiles[i] { - t.Errorf("got file[%d] = %q, want %q", i, f, tc.wantFiles[i]) - } - } }) } } @@ -552,8 +473,8 @@ func TestRunGsutil(t *testing.T) { commands: []string{"ls"}, wantCommands: []string{ "docker run --name cloudbuild_gsutil_" + newUUID() + - " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild" + - " gcr.io/cloud-builders/gsutil ls", + " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild --entrypoint bash" + + " gcr.io/cloud-builders/gsutil -c gsutil ls", }, }, { name: "HappyCaseMultiArgs", @@ -561,8 +482,8 @@ func TestRunGsutil(t *testing.T) { commands: []string{"ls", "gs://bucket-one"}, wantCommands: []string{ "docker run --name cloudbuild_gsutil_" + newUUID() + - " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild" + - " gcr.io/cloud-builders/gsutil ls gs://bucket-one", + " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild --entrypoint bash" + + " gcr.io/cloud-builders/gsutil -c gsutil ls gs://bucket-one", }, }, { name: "HappyCaseTmpDir", @@ -570,9 +491,9 @@ func TestRunGsutil(t *testing.T) { commands: []string{"ls"}, wantCommands: []string{ "docker run --name cloudbuild_gsutil_" + newUUID() + - " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild" + + " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild --entrypoint bash" + " --volume " + "tmpdir:tmpdir" + - " gcr.io/cloud-builders/gsutil ls", + " gcr.io/cloud-builders/gsutil -c gsutil ls", }, }, { name: "ErrorMissingWorkvol", @@ -596,9 +517,9 @@ func TestRunGsutil(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) - gsutilHelper := New(r, afero.NewMemMapFs()) + gsutilHelper := New(r, afero.NewMemMapFs(), noopLogger{}) - output, err := gsutilHelper.runGsutil(ctx, tc.flags, tc.commands...) + output, err := gsutilHelper.runGsutil(ctx, "", tc.flags, tc.commands...) if tc.wantError { if err == nil { t.Errorf("runGsutil(): got err = nil, want error") @@ -625,7 +546,7 @@ func TestUploadArtifactsManifest(t *testing.T) { wantManifestPath := "gs://bucket-one/manifest.log" r := newMockRunner(t, "TestUploadArtifactsManifest") - gsutilHelper := New(r, afero.NewMemMapFs()) + gsutilHelper := New(r, afero.NewMemMapFs(), noopLogger{}) manifestPath, err := gsutilHelper.UploadArtifactsManifest(ctx, flags, manifest, bucket, results) if err != nil { t.Fatalf("UploadArtifactsManifest(): err = %v", err) @@ -666,13 +587,14 @@ func TestGetGeneration(t *testing.T) { for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) - gsutilHelper := New(r, afero.NewMemMapFs()) + gsutilHelper := New(r, afero.NewMemMapFs(), noopLogger{}) url, err := gsutilHelper.getGeneration(ctx, DockerFlags{Workvol: "workvol", Workdir: "workdir"}, tc.url) wantCommands := []string{ "docker run --name cloudbuild_gsutil_" + newUUID() + - " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild gcr.io/cloud-builders/gsutil ls -a " + tc.url, + " --rm --volume /var/run/docker.sock:/var/run/docker.sock --volume workvol --workdir workdir --network cloudbuild --entrypoint bash" + + " gcr.io/cloud-builders/gsutil -c gsutil ls -a " + tc.url, } if err := checkCommands(r.commands, wantCommands); err != nil { t.Errorf("checkCommands(): err = \n%v", err) @@ -767,7 +689,7 @@ func TestParseGsutilManifest(t *testing.T) { afero.WriteFile(fs, filepath, []byte(tc.manifest), filemode) } r := newMockRunner(t, tc.name) - gsutilHelper := New(r, fs) + gsutilHelper := New(r, fs, noopLogger{}) results, err := gsutilHelper.parseGsutilManifest(filepath) if tc.wantError { @@ -815,7 +737,7 @@ func TestCreateArtifactsManifest(t *testing.T) { t.Run(tc.name, func(t *testing.T) { r := newMockRunner(t, tc.name) fs := afero.NewMemMapFs() - gsutilHelper := New(r, fs) + gsutilHelper := New(r, fs, noopLogger{}) if err := gsutilHelper.createArtifactsManifest(manifestPath, tc.results); err == nil && tc.wantErr { t.Error("got gsutilHelper.writeArtifactsManifest() = nil, want error") @@ -833,49 +755,3 @@ func TestCreateArtifactsManifest(t *testing.T) { }) } } - -func TestAdjustDest(t *testing.T) { - testCases := []struct { - name string - src string - dest string - wantDest string - }{{ - name: "NoLeadDir", - src: "test.xml", - dest: "gs://bucket/", - wantDest: "gs://bucket/", - }, { - name: "LeadDir", - src: "some/path/test.xml", - dest: "gs://bucket/", - wantDest: "gs://bucket/some/path/", - }, { - name: "LeadDirBucketDir", - src: "/some/path/test.xml", - dest: "gs://bucket/some/path/", - wantDest: "gs://bucket/some/path/some/path/", - }, { - name: "SlashLeadDir", - src: "/some/path/test.xml", - dest: "gs://bucket/", - wantDest: "gs://bucket/some/path/", - }, { - name: "DotSlashNoLeadDir", - src: "./test.xml", - dest: "gs://bucket/", - wantDest: "gs://bucket/", - }, { - name: "DotSlashLeadDir", - src: "./some/path/test.xml", - dest: "gs://bucket/", - wantDest: "gs://bucket/some/path/", - }} - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - if gotDest := adjustDest(tc.src, tc.dest); gotDest != tc.wantDest { - t.Errorf("got %q, want %q", gotDest, tc.wantDest) - } - }) - } -} diff --git a/integration_tests/gce_startup_script.sh b/integration_tests/gce_startup_script.sh index 1a3cb9bb..2353036a 100644 --- a/integration_tests/gce_startup_script.sh +++ b/integration_tests/gce_startup_script.sh @@ -76,8 +76,8 @@ gsutil -m copy ${gcs_path}/* /root/test-files/ chmod +x /root/test-files/test_script.sh || exit # Copy local builder binary to bin. -chmod +x /root/test-files/container-builder-local || exit -mv /root/test-files/container-builder-local /usr/local/bin/ +chmod +x /root/test-files/cloud-build-local || exit +mv /root/test-files/cloud-build-local /usr/local/bin/ # Copy up an empty output.txt as a signal to the runner that the script is starting. touch /root/output.txt || exit diff --git a/integration_tests/test_script.sh b/integration_tests/test_script.sh index 52dc6973..2918ceb6 100755 --- a/integration_tests/test_script.sh +++ b/integration_tests/test_script.sh @@ -7,40 +7,40 @@ gcloud config set project $PROJECT_ID docker-credential-gcr configure-docker || exit # Flags tests. -container-builder-local --version || exit -container-builder-local --help || exit -container-builder-local && exit # no source -container-builder-local . --config=cloudbuild_nil.yaml && exit # flags after source -container-builder-local --config=donotexist.yaml . && exit # non-existent config file -container-builder-local --config=cloudbuild_nil.yaml . || exit # happy dryrun case -container-builder-local --config=cloudbuild_nil.yaml --write-workspace=/tmp/workspace . || exit # happy dryrun case +cloud-build-local --version || exit +cloud-build-local --help || exit +cloud-build-local && exit # no source +cloud-build-local . --config=cloudbuild_nil.yaml && exit # flags after source +cloud-build-local --config=donotexist.yaml . && exit # non-existent config file +cloud-build-local --config=cloudbuild_nil.yaml . || exit # happy dryrun case +cloud-build-local --config=cloudbuild_nil.yaml --write-workspace=/tmp/workspace . || exit # happy dryrun case if [ ! -f /tmp/workspace/cloudbuild_nil.yaml ]; then echo "Exported file not found!" fi # Valid substitutions -container-builder-local --config=cloudbuild_substitutions.yaml --substitutions=_MESSAGE="bye world" . || exit -container-builder-local --config=cloudbuild_substitutions.yaml --substitutions=COMMIT_SHA="my-sha" . || exit -container-builder-local --config=cloudbuild_substitutions2.yaml --substitutions=_SUBSTITUTE_ME="literally-anything-else" . || exit -container-builder-local --config=cloudbuild_substitutions2.yaml --substitutions=_MESSAGE="bye world",_SUBSTITUTE_ME="literally-anything-else" . || exit -container-builder-local --config=cloudbuild_substitutions.yaml --substitutions=_MESSAGE="substitution set in command line only" . || exit +cloud-build-local --config=cloudbuild_substitutions.yaml --substitutions=_MESSAGE="bye world" . || exit +cloud-build-local --config=cloudbuild_substitutions.yaml --substitutions=COMMIT_SHA="my-sha" . || exit +cloud-build-local --config=cloudbuild_substitutions2.yaml --substitutions=_SUBSTITUTE_ME="literally-anything-else" . || exit +cloud-build-local --config=cloudbuild_substitutions2.yaml --substitutions=_MESSAGE="bye world",_SUBSTITUTE_ME="literally-anything-else" . || exit +cloud-build-local --config=cloudbuild_substitutions.yaml --substitutions=_MESSAGE="substitution set in command line only" . || exit # Invalid substitutios are expected to exit with an error (hence the `&& exit`). -container-builder-local --config=cloudbuild_substitutions.yaml --substitutions=PROJECT_ID="my-project" . && exit -container-builder-local --config=cloudbuild_builtin_substitutions.yaml . && exit +cloud-build-local --config=cloudbuild_substitutions.yaml --substitutions=PROJECT_ID="my-project" . && exit +cloud-build-local --config=cloudbuild_builtin_substitutions.yaml . && exit # End to end tests. -container-builder-local --config=cloudbuild_nil.yaml --dryrun=false . || exit -container-builder-local --config=cloudbuild_nil.yaml --dryrun=false --no-source=true || exit -container-builder-local --config=cloudbuild_nil.yaml --dryrun=false --no-source=true . && exit -container-builder-local --config=cloudbuild_dockerfile.yaml --dryrun=false . || exit -container-builder-local --config=cloudbuild_gcr.yaml --dryrun=false --push=true . || exit -container-builder-local --config=cloudbuild_big.yaml --dryrun=false --push=true . || exit -container-builder-local --config=cloudbuild_volumes.yaml --dryrun=false . || exit -container-builder-local --config=cloudbuild_buildid.yaml --dryrun=false . || exit +cloud-build-local --config=cloudbuild_nil.yaml --dryrun=false . || exit +cloud-build-local --config=cloudbuild_nil.yaml --dryrun=false --no-source=true || exit +cloud-build-local --config=cloudbuild_nil.yaml --dryrun=false --no-source=true . && exit +cloud-build-local --config=cloudbuild_dockerfile.yaml --dryrun=false . || exit +cloud-build-local --config=cloudbuild_gcr.yaml --dryrun=false --push=true . || exit +cloud-build-local --config=cloudbuild_big.yaml --dryrun=false --push=true . || exit +cloud-build-local --config=cloudbuild_volumes.yaml --dryrun=false . || exit +cloud-build-local --config=cloudbuild_buildid.yaml --dryrun=false . || exit # Confirm that we set up credentials account correctly. WANT=$(gcloud config list --format="value(core.account)") -OUT=$(container-builder-local --config=cloudbuild_auth.yaml --dryrun=false .) +OUT=$(cloud-build-local --config=cloudbuild_auth.yaml --dryrun=false .) if [[ ${OUT} =~ .*${WANT}.* ]] then echo "PASS: auth setup" diff --git a/localbuilder_main.go b/localbuilder_main.go index f3b6dd4d..eddf3c13 100644 --- a/localbuilder_main.go +++ b/localbuilder_main.go @@ -13,7 +13,7 @@ // limitations under the License. // Package main runs the gcb local builder. -package main // import "github.com/GoogleCloudPlatform/container-builder-local" +package main // import "github.com/GoogleCloudPlatform/cloud-build-local" import ( "bufio" @@ -33,13 +33,13 @@ import ( "golang.org/x/oauth2" "github.com/pborman/uuid" - "github.com/GoogleCloudPlatform/container-builder-local/build" - "github.com/GoogleCloudPlatform/container-builder-local/common" - "github.com/GoogleCloudPlatform/container-builder-local/config" - "github.com/GoogleCloudPlatform/container-builder-local/gcloud" - "github.com/GoogleCloudPlatform/container-builder-local/metadata" - "github.com/GoogleCloudPlatform/container-builder-local/runner" - "github.com/GoogleCloudPlatform/container-builder-local/volume" + "github.com/GoogleCloudPlatform/cloud-build-local/build" + "github.com/GoogleCloudPlatform/cloud-build-local/common" + "github.com/GoogleCloudPlatform/cloud-build-local/config" + "github.com/GoogleCloudPlatform/cloud-build-local/gcloud" + "github.com/GoogleCloudPlatform/cloud-build-local/metadata" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/volume" ) const ( @@ -64,13 +64,9 @@ func exitUsage(msg string) { } func main() { - // BEGIN GOOGLE-INTERNAL - // This never needs to be mirrored out to GitHub. On GitHub, we're going to - // rename the repo and change the name of the built binary. if strings.Contains(os.Args[0], "container-builder") { log.Printf("WARNING: %v is deprecated. Please run `gcloud install cloud-build-local` to install its replacement.", os.Args[0]) } - // END GOOGLE-INTERNAL flag.Parse() ctx := context.Background() args := flag.Args() diff --git a/logger/logger.go b/logger/logger.go new file mode 100644 index 00000000..6b9d18fb --- /dev/null +++ b/logger/logger.go @@ -0,0 +1,27 @@ +// Copyright 2018 Google, Inc. All rights reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package logger defines a Logger interface to be used by local builder. +package logger + +import ( + "io" +) + +// Logger encapsulates logging build output. +type Logger interface { + WriteMainEntry(msg string) + Close() error + MakeWriter(prefix string, stepIdx int, stdout bool) io.Writer +} diff --git a/metadata/metadata.go b/metadata/metadata.go index 84bb795d..dc1c9515 100644 --- a/metadata/metadata.go +++ b/metadata/metadata.go @@ -41,7 +41,7 @@ import ( "golang.org/x/oauth2" - "github.com/GoogleCloudPlatform/container-builder-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" ) const ( @@ -53,8 +53,8 @@ const ( metadataLocalSubnet = "169.254.0.0/16" // This subnet captures metadataHostedIP. This subnet is a reserved private subnet. - // This is the subnet used to create the cloudbuild docker network in the hosted - // container builder environment. All build steps are run connected to the + // This is the subnet used to create the cloudbuild docker network in the + // hosted Cloud Build environment. All build steps are run connected to the // cloudbuild docker network. metadataHostedSubnet = "192.168.10.0/24" @@ -193,14 +193,14 @@ func (r RealUpdater) SetProjectInfo(b ProjectInfo) error { } // StartLocalServer starts the metadata server container for VMs running as -// part of the container builder service. +// part of the Cloud Build service. // // This version of Start*Server does not update iptables. // // The container listens on local port 8082, which is where RealUpdater POSTs // to. func StartLocalServer(ctx context.Context, r runner.Runner, metadataImage string) error { - // Unlike the hosted container builder service, the user's local machine is + // Unlike the hosted Cloud Build service, the user's local machine is // not guaranteed to have the latest version, so we explicitly pull it. if err := r.Run(ctx, []string{"docker", "pull", metadataImage}, nil, os.Stdout, os.Stderr, ""); err != nil { return err @@ -209,7 +209,7 @@ func StartLocalServer(ctx context.Context, r runner.Runner, metadataImage string } // StartCloudServer starts the metadata server container for VMs running as -// part of the container builder service. +// part of the Cloud Build service. // // This version of Start*Server needs to make iptables rules that we don't // want (or need) on a user's local machine. diff --git a/validate/validate.go b/validate/validate.go index 0a7f0b52..af80d991 100644 --- a/validate/validate.go +++ b/validate/validate.go @@ -23,10 +23,11 @@ import ( "regexp" "strings" "time" + "unicode" pb "google.golang.org/genproto/googleapis/devtools/cloudbuild/v1" "github.com/golang/protobuf/ptypes" - "github.com/GoogleCloudPlatform/container-builder-local/subst" + "github.com/GoogleCloudPlatform/cloud-build-local/subst" "github.com/docker/distribution/reference" ) @@ -328,10 +329,19 @@ func CheckArtifacts(b *pb.Build) error { pathExists := map[string]bool{} duplicates := []string{} for _, p := range b.Artifacts.Objects.Paths { + // Count duplicates. if _, ok := pathExists[p]; ok { duplicates = append(duplicates, p) } pathExists[p] = true + + // Paths with whitespace are invalid. + + for _, ch := range p { + if unicode.IsSpace(ch) { + return fmt.Errorf(".artifacts.paths %q contains whitespace", p) + } + } } if len(duplicates) > 0 { return fmt.Errorf(".artifacts.paths field has duplicate paths; remove duplicates [%s]", strings.Join(duplicates, ", ")) diff --git a/validate/validate_test.go b/validate/validate_test.go index c98ad1a8..2580b132 100644 --- a/validate/validate_test.go +++ b/validate/validate_test.go @@ -519,6 +519,15 @@ func TestCheckArtifacts(t *testing.T) { }, }, wantErr: true, + }, { + // .artifacts.paths has whitespace + artifacts: &pb.Artifacts{ + Objects: &pb.Artifacts_ArtifactObjects{ + Location: "gs://some-bucket/", + Paths: []string{"twins.xml", "-n twins.xml"}, + }, + }, + wantErr: true, }} { b := &pb.Build{ Images: c.images, diff --git a/volume/volume.go b/volume/volume.go index a30b1fbb..b185f363 100644 --- a/volume/volume.go +++ b/volume/volume.go @@ -20,7 +20,7 @@ import ( "fmt" "sync" - "github.com/GoogleCloudPlatform/container-builder-local/runner" + "github.com/GoogleCloudPlatform/cloud-build-local/runner" ) const ( From 26ff69714807e13c59725d9e2b2e6436bfe7750d Mon Sep 17 00:00:00 2001 From: Cloud Build Team Date: Tue, 24 Jul 2018 10:03:57 -0400 Subject: [PATCH 3/5] Project import generated by Copybara. PiperOrigin-RevId: 205822686 --- cloudbuild.yaml | 2 +- vendor.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/cloudbuild.yaml b/cloudbuild.yaml index f1542ee2..2648b148 100644 --- a/cloudbuild.yaml +++ b/cloudbuild.yaml @@ -12,7 +12,7 @@ steps: # Binary creation. - name: 'gcr.io/cloud-builders/go:debian' - args: ['build', '-o', 'container-builder-local', 'github.com/GoogleCloudPlatform/container-builder-local'] + args: ['build', '-o', 'cloud-build-local', 'github.com/GoogleCloudPlatform/cloud-build-local'] # Copy the integration test files to GCS and # run integration tests in a VM. diff --git a/vendor.sh b/vendor.sh index c2e25db9..7c87e8f0 100755 --- a/vendor.sh +++ b/vendor.sh @@ -1,5 +1,5 @@ mkdir vendor || exit -imports=$(go list -f '{{range .Imports}}{{printf "%s\n" .}}{{end}}' ./... | grep -v "github.com/GoogleCloudPlatform/container-builder-local" ) +imports=$(go list -f '{{range .Imports}}{{printf "%s\n" .}}{{end}}' ./... | grep -v "github.com/GoogleCloudPlatform/cloud-build-local" ) export GOPATH=$PWD/vendor set -x for import in $imports; do From 711a2d119177e13748997f9c463d54f0eab46e72 Mon Sep 17 00:00:00 2001 From: Cloud Build Team Date: Tue, 24 Jul 2018 10:43:38 -0400 Subject: [PATCH 4/5] Project import generated by Copybara. PiperOrigin-RevId: 205827022 --- integration_tests/run_tests_on_vm.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/integration_tests/run_tests_on_vm.sh b/integration_tests/run_tests_on_vm.sh index bc49f234..fbe266aa 100755 --- a/integration_tests/run_tests_on_vm.sh +++ b/integration_tests/run_tests_on_vm.sh @@ -2,7 +2,7 @@ DATE=`date +%Y%m%d-%H%M%S` GCS_PATH=gs://local-builder-test/$DATE GCS_LOGS_PATH=gs://local-builder-test-logs/$DATE -gsutil -m copy container-builder-local $GCS_PATH/ +gsutil -m copy cloud-build-local $GCS_PATH/ gsutil -m copy ./integration_tests/* $GCS_PATH/ # Create a VM with startup script. From 024da0f04e87a990268d44258494ac6f3dd12176 Mon Sep 17 00:00:00 2001 From: Cloud Build Team Date: Tue, 24 Jul 2018 11:22:23 -0400 Subject: [PATCH 5/5] Project import generated by Copybara. PiperOrigin-RevId: 205831522 --- common/common.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/common/common.go b/common/common.go index 7f7c9c79..6ccf58cd 100644 --- a/common/common.go +++ b/common/common.go @@ -175,9 +175,6 @@ func SubstituteAndValidate(b *pb.Build, substMap map[string]string) error { // TokenTransport is a RoundTripper that automatically applies OAuth // credentials from the token source. -// -// This can be replaced by google.DefaultClient when metadata spoofing works by -// IP address (b/33233310). type TokenTransport struct { Ts oauth2.TokenSource }