diff --git a/docs/users/api-spec-resources.md b/docs/users/api-spec-resources.md index 1d8780a9f..64a10f5ff 100644 --- a/docs/users/api-spec-resources.md +++ b/docs/users/api-spec-resources.md @@ -545,6 +545,17 @@ Returns 201 (Created) on success. Result is a JSON document like: The `commitment` object has the same structure as the `commitments[]` objects in `GET /v1/domains/:domain_id/projects/:project_id/commitments`. If `confirm_by` was given, a successful response will include the `confirmed_at` timestamp. +### POST /v1/domains/:domain\_id/projects/:project\_id/commitments/merge + +Merges active commitments on the same resource within the given project. The newly created merged commitment receives the latest expiration date of all given commitments. Requires a project-admin token, and a request body that is a JSON document like: + +```json +{ + "commitment_ids": [1,2,5] +} +``` +Returns 202 (Accepted) on success, and returns the merged commitment as a JSON document. + ### POST /v1/domains/:domain\_id/projects/:project\_id/commitments/can-confirm Checks if a new commitment within the given project could be confirmed immediately. diff --git a/internal/api/audit.go b/internal/api/audit.go index 22088e20b..bcb2e67d3 100644 --- a/internal/api/audit.go +++ b/internal/api/audit.go @@ -23,6 +23,8 @@ import ( "fmt" "strconv" + "github.com/sapcc/limes/internal/db" + "github.com/sapcc/go-api-declarations/cadf" "github.com/sapcc/go-api-declarations/limes" limesrates "github.com/sapcc/go-api-declarations/limes/rates" @@ -101,12 +103,12 @@ func (t rateLimitEventTarget) Render() cadf.Resource { // commitmentEventTarget contains the structure for rendering a cadf.Event.Target for // changes regarding commitments. type commitmentEventTarget struct { - DomainID string - DomainName string - ProjectID string - ProjectName string - SupersededCommitment *limesresources.Commitment - Commitments []limesresources.Commitment // must have at least one entry + DomainID string + DomainName string + ProjectID string + ProjectName string + Commitments []limesresources.Commitment // must have at least one entry + WorkflowContext *db.CommitmentWorkflowContext } // Render implements the audittools.Target interface. @@ -131,8 +133,8 @@ func (t commitmentEventTarget) Render() cadf.Resource { attachment := must.Return(cadf.NewJSONAttachment(name, commitment)) res.Attachments = append(res.Attachments, attachment) } - if t.SupersededCommitment != nil { - attachment := must.Return(cadf.NewJSONAttachment("superseded-payload", *t.SupersededCommitment)) + if t.WorkflowContext != nil { + attachment := must.Return(cadf.NewJSONAttachment("context-payload", *t.WorkflowContext)) res.Attachments = append(res.Attachments, attachment) } return res diff --git a/internal/api/commitment.go b/internal/api/commitment.go index 4c7b8775f..502e1607f 100644 --- a/internal/api/commitment.go +++ b/internal/api/commitment.go @@ -381,16 +381,22 @@ func (p *v1Provider) CreateProjectCommitment(w http.ResponseWriter, r *http.Requ // prepare commitment confirmBy := maybeUnpackUnixEncodedTime(req.ConfirmBy) + creationContext := db.CommitmentWorkflowContext{Reason: db.CommitmentReasonCreate} + buf, err := json.Marshal(creationContext) + if respondwith.ErrorText(w, err) { + return + } dbCommitment := db.ProjectCommitment{ - AZResourceID: azResourceID, - Amount: req.Amount, - Duration: req.Duration, - CreatedAt: now, - CreatorUUID: token.UserUUID(), - CreatorName: fmt.Sprintf("%s@%s", token.UserName(), token.UserDomainName()), - ConfirmBy: confirmBy, - ConfirmedAt: nil, // may be set below - ExpiresAt: req.Duration.AddTo(unwrapOrDefault(confirmBy, now)), + AZResourceID: azResourceID, + Amount: req.Amount, + Duration: req.Duration, + CreatedAt: now, + CreatorUUID: token.UserUUID(), + CreatorName: fmt.Sprintf("%s@%s", token.UserName(), token.UserDomainName()), + ConfirmBy: confirmBy, + ConfirmedAt: nil, // may be set below + ExpiresAt: req.Duration.AddTo(unwrapOrDefault(confirmBy, now)), + CreationContextJSON: json.RawMessage(buf), } if req.ConfirmBy == nil { // if not planned for confirmation in the future, confirm immediately (or fail) @@ -424,11 +430,12 @@ func (p *v1Provider) CreateProjectCommitment(w http.ResponseWriter, r *http.Requ ReasonCode: http.StatusCreated, Action: cadf.CreateAction, Target: commitmentEventTarget{ - DomainID: dbDomain.UUID, - DomainName: dbDomain.Name, - ProjectID: dbProject.UUID, - ProjectName: dbProject.Name, - Commitments: []limesresources.Commitment{p.convertCommitmentToDisplayForm(dbCommitment, *loc, token)}, + DomainID: dbDomain.UUID, + DomainName: dbDomain.Name, + ProjectID: dbProject.UUID, + ProjectName: dbProject.Name, + Commitments: []limesresources.Commitment{p.convertCommitmentToDisplayForm(dbCommitment, *loc, token)}, + WorkflowContext: &creationContext, }, }) @@ -451,6 +458,160 @@ func (p *v1Provider) CreateProjectCommitment(w http.ResponseWriter, r *http.Requ respondwith.JSON(w, http.StatusCreated, map[string]any{"commitment": c}) } +// MergeProjectCommitments handles POST /v1/domains/:domain_id/projects/:project_id/commitments/merge. +func (p *v1Provider) MergeProjectCommitments(w http.ResponseWriter, r *http.Request) { + httpapi.IdentifyEndpoint(r, "/v1/domains/:id/projects/:id/commitments/merge") + token := p.CheckToken(r) + if !token.Require(w, "project:edit") { + return + } + dbDomain := p.FindDomainFromRequest(w, r) + if dbDomain == nil { + return + } + dbProject := p.FindProjectFromRequest(w, r, dbDomain) + if dbProject == nil { + return + } + var parseTarget struct { + CommitmentIDs []db.ProjectCommitmentID `json:"commitment_ids"` + } + if !RequireJSON(w, r, &parseTarget) { + return + } + commitmentIDs := parseTarget.CommitmentIDs + if len(commitmentIDs) < 2 { + http.Error(w, fmt.Sprintf("merging requires at least two commitments, but %d were given", len(commitmentIDs)), http.StatusBadRequest) + return + } + + // Load commitments + dbCommitments := make([]db.ProjectCommitment, len(commitmentIDs)) + for i, commitmentID := range commitmentIDs { + err := p.DB.SelectOne(&dbCommitments[i], findProjectCommitmentByIDQuery, commitmentID, dbProject.ID) + if errors.Is(err, sql.ErrNoRows) { + http.Error(w, "no such commitment", http.StatusNotFound) + return + } else if respondwith.ErrorText(w, err) { + return + } + } + + // Verify that all commitments agree on resource and AZ and are active + azResourceID := dbCommitments[0].AZResourceID + for _, dbCommitment := range dbCommitments { + if dbCommitment.AZResourceID != azResourceID { + http.Error(w, "all commitments must be on the same resource and AZ", http.StatusConflict) + return + } + if dbCommitment.State != db.CommitmentStateActive { + http.Error(w, "only active commitments may be merged", http.StatusConflict) + return + } + } + + var loc core.AZResourceLocation + err := p.DB.QueryRow(findProjectAZResourceLocationByIDQuery, azResourceID). + Scan(&loc.ServiceType, &loc.ResourceName, &loc.AvailabilityZone) + if errors.Is(err, sql.ErrNoRows) { + http.Error(w, "no route to this commitment", http.StatusNotFound) + return + } else if respondwith.ErrorText(w, err) { + return + } + + // Start transaction for creating new commitment and marking merged commitments as superseded + tx, err := p.DB.Begin() + if respondwith.ErrorText(w, err) { + return + } + defer sqlext.RollbackUnlessCommitted(tx) + + // Create merged template + now := p.timeNow() + dbMergedCommitment := db.ProjectCommitment{ + AZResourceID: azResourceID, + Amount: 0, // overwritten below + Duration: limesresources.CommitmentDuration{}, // overwritten below + CreatedAt: now, + CreatorUUID: token.UserUUID(), + CreatorName: fmt.Sprintf("%s@%s", token.UserName(), token.UserDomainName()), + ConfirmedAt: &now, + ExpiresAt: time.Time{}, // overwritten below + State: db.CommitmentStateActive, + } + + // Fill amount and latest expiration date + for _, dbCommitment := range dbCommitments { + dbMergedCommitment.Amount += dbCommitment.Amount + if dbCommitment.ExpiresAt.After(dbMergedCommitment.ExpiresAt) { + dbMergedCommitment.ExpiresAt = dbCommitment.ExpiresAt + dbMergedCommitment.Duration = dbCommitment.Duration + } + } + + // Fill workflow context + creationContext := db.CommitmentWorkflowContext{ + Reason: db.CommitmentReasonMerge, + RelatedCommitmentIDs: commitmentIDs, + } + buf, err := json.Marshal(creationContext) + if respondwith.ErrorText(w, err) { + return + } + dbMergedCommitment.CreationContextJSON = json.RawMessage(buf) + + // Insert into database + err = tx.Insert(&dbMergedCommitment) + if respondwith.ErrorText(w, err) { + return + } + + // Mark merged commits as superseded + supersedeContext := db.CommitmentWorkflowContext{ + Reason: db.CommitmentReasonMerge, + RelatedCommitmentIDs: []db.ProjectCommitmentID{dbMergedCommitment.ID}, + } + buf, err = json.Marshal(supersedeContext) + if respondwith.ErrorText(w, err) { + return + } + for _, dbCommitment := range dbCommitments { + dbCommitment.SupersededAt = &now + dbCommitment.SupersedeContextJSON = liquids.PointerTo(json.RawMessage(buf)) + dbCommitment.State = db.CommitmentStateSuperseded + _, err = tx.Update(&dbCommitment) + if respondwith.ErrorText(w, err) { + return + } + } + + err = tx.Commit() + if respondwith.ErrorText(w, err) { + return + } + + c := p.convertCommitmentToDisplayForm(dbMergedCommitment, loc, token) + auditEvent := commitmentEventTarget{ + DomainID: dbDomain.UUID, + DomainName: dbDomain.Name, + ProjectID: dbProject.UUID, + ProjectName: dbProject.Name, + Commitments: []limesresources.Commitment{c}, + WorkflowContext: &creationContext, + } + p.auditor.Record(audittools.Event{ + Time: p.timeNow(), + Request: r, + User: token, + ReasonCode: http.StatusAccepted, + Action: cadf.UpdateAction, + Target: auditEvent, + }) + + respondwith.JSON(w, http.StatusAccepted, map[string]any{"commitment": c}) +} + // DeleteProjectCommitment handles DELETE /v1/domains/:domain_id/projects/:project_id/commitments/:id. func (p *v1Provider) DeleteProjectCommitment(w http.ResponseWriter, r *http.Request) { httpapi.IdentifyEndpoint(r, "/v1/domains/:id/projects/:id/commitments/:id") @@ -519,7 +680,9 @@ func (p *v1Provider) DeleteProjectCommitment(w http.ResponseWriter, r *http.Requ func (p *v1Provider) canDeleteCommitment(token *gopherpolicy.Token, commitment db.ProjectCommitment) bool { // up to 24 hours after creation of fresh commitments, future commitments can still be deleted by their creators if commitment.State == db.CommitmentStatePlanned || commitment.State == db.CommitmentStatePending || commitment.State == db.CommitmentStateActive { - if commitment.PredecessorID == nil && p.timeNow().Before(commitment.CreatedAt.Add(24*time.Hour)) { + var creationContext db.CommitmentWorkflowContext + err := json.Unmarshal(commitment.CreationContextJSON, &creationContext) + if err == nil && creationContext.Reason == db.CommitmentReasonCreate && p.timeNow().Before(commitment.CreatedAt.Add(24*time.Hour)) { if token.Check("project:edit") { return true } @@ -605,10 +768,16 @@ func (p *v1Provider) StartCommitmentTransfer(w http.ResponseWriter, r *http.Requ now := p.timeNow() transferAmount := req.Amount remainingAmount := dbCommitment.Amount - req.Amount - transferCommitment := p.buildSplitCommitment(dbCommitment, transferAmount) + transferCommitment, err := p.buildSplitCommitment(dbCommitment, transferAmount) + if respondwith.ErrorText(w, err) { + return + } transferCommitment.TransferStatus = req.TransferStatus transferCommitment.TransferToken = &transferToken - remainingCommitment := p.buildSplitCommitment(dbCommitment, remainingAmount) + remainingCommitment, err := p.buildSplitCommitment(dbCommitment, remainingAmount) + if respondwith.ErrorText(w, err) { + return + } err = tx.Insert(&transferCommitment) if respondwith.ErrorText(w, err) { return @@ -617,8 +786,17 @@ func (p *v1Provider) StartCommitmentTransfer(w http.ResponseWriter, r *http.Requ if respondwith.ErrorText(w, err) { return } + supersedeContext := db.CommitmentWorkflowContext{ + Reason: db.CommitmentReasonSplit, + RelatedCommitmentIDs: []db.ProjectCommitmentID{transferCommitment.ID, remainingCommitment.ID}, + } + buf, err := json.Marshal(supersedeContext) + if respondwith.ErrorText(w, err) { + return + } dbCommitment.State = db.CommitmentStateSuperseded dbCommitment.SupersededAt = &now + dbCommitment.SupersedeContextJSON = liquids.PointerTo(json.RawMessage(buf)) _, err = tx.Update(&dbCommitment) if respondwith.ErrorText(w, err) { return @@ -654,33 +832,59 @@ func (p *v1Provider) StartCommitmentTransfer(w http.ResponseWriter, r *http.Requ ProjectID: dbProject.UUID, ProjectName: dbProject.Name, Commitments: []limesresources.Commitment{c}, - // TODO: if commitment was split, log all participating commitment objects (incl. the SupersededCommitment) }, }) respondwith.JSON(w, http.StatusAccepted, map[string]any{"commitment": c}) } -func (p *v1Provider) buildSplitCommitment(dbCommitment db.ProjectCommitment, amount uint64) db.ProjectCommitment { +func (p *v1Provider) buildSplitCommitment(dbCommitment db.ProjectCommitment, amount uint64) (db.ProjectCommitment, error) { now := p.timeNow() - return db.ProjectCommitment{ - AZResourceID: dbCommitment.AZResourceID, - Amount: amount, - Duration: dbCommitment.Duration, - CreatedAt: now, - CreatorUUID: dbCommitment.CreatorUUID, - CreatorName: dbCommitment.CreatorName, - ConfirmBy: dbCommitment.ConfirmBy, - ConfirmedAt: dbCommitment.ConfirmedAt, - ExpiresAt: dbCommitment.ExpiresAt, - PredecessorID: &dbCommitment.ID, - State: dbCommitment.State, + creationContext := db.CommitmentWorkflowContext{ + Reason: db.CommitmentReasonSplit, + RelatedCommitmentIDs: []db.ProjectCommitmentID{dbCommitment.ID}, + } + buf, err := json.Marshal(creationContext) + if err != nil { + return db.ProjectCommitment{}, err } + return db.ProjectCommitment{ + AZResourceID: dbCommitment.AZResourceID, + Amount: amount, + Duration: dbCommitment.Duration, + CreatedAt: now, + CreatorUUID: dbCommitment.CreatorUUID, + CreatorName: dbCommitment.CreatorName, + ConfirmBy: dbCommitment.ConfirmBy, + ConfirmedAt: dbCommitment.ConfirmedAt, + ExpiresAt: dbCommitment.ExpiresAt, + CreationContextJSON: json.RawMessage(buf), + State: dbCommitment.State, + }, nil } -func (p *v1Provider) buildConvertedCommitment(dbCommitment db.ProjectCommitment, azResourceID db.ProjectAZResourceID, amount uint64) db.ProjectCommitment { - commitment := p.buildSplitCommitment(dbCommitment, amount) - commitment.AZResourceID = azResourceID - return commitment +func (p *v1Provider) buildConvertedCommitment(dbCommitment db.ProjectCommitment, azResourceID db.ProjectAZResourceID, amount uint64) (db.ProjectCommitment, error) { + now := p.timeNow() + creationContext := db.CommitmentWorkflowContext{ + Reason: db.CommitmentReasonConvert, + RelatedCommitmentIDs: []db.ProjectCommitmentID{dbCommitment.ID}, + } + buf, err := json.Marshal(creationContext) + if err != nil { + return db.ProjectCommitment{}, err + } + return db.ProjectCommitment{ + AZResourceID: azResourceID, + Amount: amount, + Duration: dbCommitment.Duration, + CreatedAt: now, + CreatorUUID: dbCommitment.CreatorUUID, + CreatorName: dbCommitment.CreatorName, + ConfirmBy: dbCommitment.ConfirmBy, + ConfirmedAt: dbCommitment.ConfirmedAt, + ExpiresAt: dbCommitment.ExpiresAt, + CreationContextJSON: json.RawMessage(buf), + State: dbCommitment.State, + }, nil } // GetCommitmentByTransferToken handles GET /v1/commitments/{token} @@ -1025,16 +1229,20 @@ func (p *v1Provider) ConvertCommitment(w http.ResponseWriter, r *http.Request) { } auditEvent := commitmentEventTarget{ - DomainID: dbDomain.UUID, - DomainName: dbDomain.Name, - ProjectID: dbProject.UUID, - ProjectName: dbProject.Name, - SupersededCommitment: liquids.PointerTo(p.convertCommitmentToDisplayForm(dbCommitment, sourceLoc, token)), + DomainID: dbDomain.UUID, + DomainName: dbDomain.Name, + ProjectID: dbProject.UUID, + ProjectName: dbProject.Name, } + relatedCommitmentIDs := make([]db.ProjectCommitmentID, 0) remainingAmount := dbCommitment.Amount - req.SourceAmount if remainingAmount > 0 { - remainingCommitment := p.buildSplitCommitment(dbCommitment, remainingAmount) + remainingCommitment, err := p.buildSplitCommitment(dbCommitment, remainingAmount) + if respondwith.ErrorText(w, err) { + return + } + relatedCommitmentIDs = append(relatedCommitmentIDs, remainingCommitment.ID) err = tx.Insert(&remainingCommitment) if respondwith.ErrorText(w, err) { return @@ -1044,7 +1252,11 @@ func (p *v1Provider) ConvertCommitment(w http.ResponseWriter, r *http.Request) { ) } - convertedCommitment := p.buildConvertedCommitment(dbCommitment, targetAZResourceID, conversionAmount) + convertedCommitment, err := p.buildConvertedCommitment(dbCommitment, targetAZResourceID, conversionAmount) + if respondwith.ErrorText(w, err) { + return + } + relatedCommitmentIDs = append(relatedCommitmentIDs, convertedCommitment.ID) err = tx.Insert(&convertedCommitment) if respondwith.ErrorText(w, err) { return @@ -1052,8 +1264,17 @@ func (p *v1Provider) ConvertCommitment(w http.ResponseWriter, r *http.Request) { // supersede the original commitment now := p.timeNow() + supersedeContext := db.CommitmentWorkflowContext{ + Reason: db.CommitmentReasonConvert, + RelatedCommitmentIDs: relatedCommitmentIDs, + } + buf, err := json.Marshal(supersedeContext) + if respondwith.ErrorText(w, err) { + return + } dbCommitment.State = db.CommitmentStateSuperseded dbCommitment.SupersededAt = &now + dbCommitment.SupersedeContextJSON = liquids.PointerTo(json.RawMessage(buf)) _, err = tx.Update(&dbCommitment) if respondwith.ErrorText(w, err) { return @@ -1066,6 +1287,10 @@ func (p *v1Provider) ConvertCommitment(w http.ResponseWriter, r *http.Request) { c := p.convertCommitmentToDisplayForm(convertedCommitment, targetLoc, token) auditEvent.Commitments = append([]limesresources.Commitment{c}, auditEvent.Commitments...) + auditEvent.WorkflowContext = &db.CommitmentWorkflowContext{ + Reason: db.CommitmentReasonSplit, + RelatedCommitmentIDs: []db.ProjectCommitmentID{dbCommitment.ID}, + } p.auditor.Record(audittools.Event{ Time: p.timeNow(), Request: r, diff --git a/internal/api/commitment_test.go b/internal/api/commitment_test.go index 0ffa194db..e4828c023 100644 --- a/internal/api/commitment_test.go +++ b/internal/api/commitment_test.go @@ -1468,3 +1468,232 @@ func Test_UpdateCommitmentDuration(t *testing.T) { ExpectStatus: http.StatusForbidden, }.Check(t, s.Handler) } + +func Test_MergeCommitments(t *testing.T) { + s := test.NewSetup(t, + test.WithDBFixtureFile("fixtures/start-data-commitments.sql"), + test.WithConfig(testCommitmentsYAMLWithoutMinConfirmDate), + test.WithAPIHandler(NewV1API), + ) + + // Create two confirmed commitments on the same resource + req1 := assert.JSONObject{ + "id": 1, + "service_type": "second", + "resource_name": "capacity", + "availability_zone": "az-one", + "amount": 10, + "duration": "1 hour", + } + req2 := assert.JSONObject{ + "id": 2, + "service_type": "second", + "resource_name": "capacity", + "availability_zone": "az-one", + "amount": 5, + "duration": "2 hours", + } + // Create confirmed commitment in different AZ + req3 := assert.JSONObject{ + "id": 3, + "service_type": "second", + "resource_name": "capacity", + "availability_zone": "az-two", + "amount": 1, + "duration": "2 hours", + } + // Create confirmed commitment on different resource + req4 := assert.JSONObject{ + "id": 4, + "service_type": "second", + "resource_name": "capacity_portion", + "availability_zone": "az-one", + "amount": 1, + "duration": "2 hours", + } + resp3 := assert.JSONObject{ + "id": 3, + "service_type": "second", + "resource_name": "capacity", + "availability_zone": "az-two", + "amount": 1, + "unit": "B", + "duration": "2 hours", + "created_at": s.Clock.Now().Unix(), + "creator_uuid": "uuid-for-alice", + "creator_name": "alice@Default", + "can_be_deleted": true, + "confirmed_at": 0, + "expires_at": s.Clock.Now().Add(2 * time.Hour).Unix(), + } + resp4 := assert.JSONObject{ + "id": 4, + "service_type": "second", + "resource_name": "capacity_portion", + "availability_zone": "az-one", + "amount": 1, + "unit": "B", + "duration": "2 hours", + "created_at": s.Clock.Now().Unix(), + "creator_uuid": "uuid-for-alice", + "creator_name": "alice@Default", + "can_be_deleted": true, + "confirmed_at": 0, + "expires_at": s.Clock.Now().Add(2 * time.Hour).Unix(), + } + // Merged commitment + resp5 := assert.JSONObject{ + "id": 5, + "service_type": "second", + "resource_name": "capacity", + "availability_zone": "az-one", + "amount": 15, + "unit": "B", + "duration": "2 hours", + "created_at": s.Clock.Now().Unix(), + "creator_uuid": "uuid-for-alice", + "creator_name": "alice@Default", + "can_be_deleted": true, + "confirmed_at": 0, + "expires_at": s.Clock.Now().Add(2 * time.Hour).Unix(), + } + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/new", + Body: assert.JSONObject{"commitment": req1}, + ExpectStatus: http.StatusCreated, + }.Check(t, s.Handler) + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/new", + Body: assert.JSONObject{"commitment": req2}, + ExpectStatus: http.StatusCreated, + }.Check(t, s.Handler) + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/new", + Body: assert.JSONObject{"commitment": req3}, + ExpectStatus: http.StatusCreated, + }.Check(t, s.Handler) + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/new", + Body: assert.JSONObject{"commitment": req4}, + ExpectStatus: http.StatusCreated, + }.Check(t, s.Handler) + + // No authentication + // Missing edit permissions + s.TokenValidator.Enforcer.AllowEdit = false + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 2}}, + ExpectStatus: http.StatusForbidden, + }.Check(t, s.Handler) + s.TokenValidator.Enforcer.AllowEdit = true + + // Unknown domain, project and commitment + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/unknown/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 2}}, + ExpectStatus: http.StatusNotFound, + }.Check(t, s.Handler) + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/unknown/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 2}}, + ExpectStatus: http.StatusNotFound, + }.Check(t, s.Handler) + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 2000}}, + ExpectStatus: http.StatusNotFound, + }.Check(t, s.Handler) + + // Check that there are at least 2 commits to merge + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1}}, + ExpectStatus: http.StatusBadRequest, + }.Check(t, s.Handler) + + // Do not merge commitments in different AZs + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 3}}, + ExpectStatus: http.StatusConflict, + }.Check(t, s.Handler) + // Do not merge commitments on different resources + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 4}}, + ExpectStatus: http.StatusConflict, + }.Check(t, s.Handler) + + // Do not merge commitments with states other than "active" + unmergableStates := []db.CommitmentState{db.CommitmentStatePlanned, db.CommitmentStatePending, db.CommitmentStateSuperseded, db.CommitmentStateExpired} + for _, state := range unmergableStates { + _, err := s.DB.Exec("UPDATE project_commitments SET state=$1 where ID = 2", state) + if err != nil { + t.Fatal(err) + } + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 2}}, + ExpectStatus: http.StatusConflict, + }.Check(t, s.Handler) + } + _, err := s.DB.Exec("UPDATE project_commitments SET state=$1 where ID = 2", db.CommitmentStateActive) + if err != nil { + t.Fatal(err) + } + + // Happy path + // New merged commitment should be returned with latest expiration date of all commitments + assert.HTTPRequest{ + Method: http.MethodPost, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments/merge", + Body: assert.JSONObject{"commitment_ids": []int{1, 2}}, + ExpectBody: assert.JSONObject{"commitment": resp5}, + ExpectStatus: http.StatusAccepted, + }.Check(t, s.Handler) + // Check that commitments not involved in the merge remained the same + // Check that new merged commitment is present and that superseded commitments are not reported anymore + assert.HTTPRequest{ + Method: http.MethodGet, + Path: "/v1/domains/uuid-for-germany/projects/uuid-for-berlin/commitments", + ExpectBody: assert.JSONObject{"commitments": []assert.JSONObject{resp3, resp4, resp5}}, + ExpectStatus: http.StatusOK, + }.Check(t, s.Handler) + // Validate that commitments that were merged are now superseded and have the correct context + var supersededCommitment db.ProjectCommitment + err = s.DB.SelectOne(&supersededCommitment, `SELECT * FROM project_commitments where ID = 1`) + if err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, "commitment state", supersededCommitment.State, db.CommitmentStateSuperseded) + expectedContext := db.CommitmentWorkflowContext{Reason: db.CommitmentReasonMerge, RelatedCommitmentIDs: []db.ProjectCommitmentID{5}} + var supersedeContext db.CommitmentWorkflowContext + err = json.Unmarshal(*supersededCommitment.SupersedeContextJSON, &supersedeContext) + if err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, "commitment supersede context", supersedeContext, expectedContext) + err = s.DB.SelectOne(&supersededCommitment, `SELECT * FROM project_commitments where ID = 2`) + if err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, "commitment state", supersededCommitment.State, db.CommitmentStateSuperseded) + err = json.Unmarshal(*supersededCommitment.SupersedeContextJSON, &supersedeContext) + if err != nil { + t.Fatal(err) + } + assert.DeepEqual(t, "commitment supersede context", supersedeContext, expectedContext) +} diff --git a/internal/api/core.go b/internal/api/core.go index a1ad3b878..260e8302b 100644 --- a/internal/api/core.go +++ b/internal/api/core.go @@ -162,6 +162,7 @@ func (p *v1Provider) AddTo(r *mux.Router) { r.Methods("GET").Path("/v1/domains/{domain_id}/projects/{project_id}/commitments").HandlerFunc(p.GetProjectCommitments) r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/commitments/new").HandlerFunc(p.CreateProjectCommitment) + r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/commitments/merge").HandlerFunc(p.MergeProjectCommitments) r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/commitments/can-confirm").HandlerFunc(p.CanConfirmNewProjectCommitment) r.Methods("DELETE").Path("/v1/domains/{domain_id}/projects/{project_id}/commitments/{id}").HandlerFunc(p.DeleteProjectCommitment) r.Methods("POST").Path("/v1/domains/{domain_id}/projects/{project_id}/commitments/{id}/start-transfer").HandlerFunc(p.StartCommitmentTransfer) diff --git a/internal/api/fixtures/start-data.sql b/internal/api/fixtures/start-data.sql index 9993ea1b6..2a0d39c03 100644 --- a/internal/api/fixtures/start-data.sql +++ b/internal/api/fixtures/start-data.sql @@ -114,18 +114,18 @@ INSERT INTO project_az_resources (id, resource_id, az, quota, usage, physical_us -- project_commitments has several entries for project dresden -- on "unshared/capacity": regular active commitments with different durations -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (1, 17, 1, '2 years', UNIX(1), 'uuid-for-alice', 'alice@Default', UNIX(1), UNIX(1), UNIX(100000001), 'active'); -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (2, 17, 1, '1 year', UNIX(2), 'uuid-for-alice', 'alice@Default', UNIX(2), UNIX(2), UNIX(100000002), 'active'); -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (3, 17, 1, '1 year', UNIX(3), 'uuid-for-alice', 'alice@Default', UNIX(3), UNIX(3), UNIX(100000003), 'active'); -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (4, 18, 2, '1 year', UNIX(4), 'uuid-for-alice', 'alice@Default', UNIX(4), UNIX(4), UNIX(100000004), 'active'); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (1, 17, 1, '2 years', UNIX(1), 'uuid-for-alice', 'alice@Default', UNIX(1), UNIX(1), UNIX(100000001), 'active', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (2, 17, 1, '1 year', UNIX(2), 'uuid-for-alice', 'alice@Default', UNIX(2), UNIX(2), UNIX(100000002), 'active', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (3, 17, 1, '1 year', UNIX(3), 'uuid-for-alice', 'alice@Default', UNIX(3), UNIX(3), UNIX(100000003), 'active', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (4, 18, 2, '1 year', UNIX(4), 'uuid-for-alice', 'alice@Default', UNIX(4), UNIX(4), UNIX(100000004), 'active', '{}'::jsonb); -- on "unshared/capacity": unconfirmed commitments should be reported as "pending" -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (5, 18, 100, '2 years', UNIX(5), 'uuid-for-alice', 'alice@Default', UNIX(5), NULL, UNIX(100000005), 'pending'); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (5, 18, 100, '2 years', UNIX(5), 'uuid-for-alice', 'alice@Default', UNIX(5), NULL, UNIX(100000005), 'pending', '{}'::jsonb); -- on "unshared/capacity": expired commitments should not be reported (NOTE: the test's clock stands at UNIX timestamp 3600) -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (6, 17, 5, '10 minutes', UNIX(6), 'uuid-for-alice', 'alice@Default', UNIX(6), UNIX(6), UNIX(606), 'expired'); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (6, 17, 5, '10 minutes', UNIX(6), 'uuid-for-alice', 'alice@Default', UNIX(6), UNIX(6), UNIX(606), 'expired', '{}'::jsonb); -- on "shared/capacity": only an unconfirmed commitment that should be reported as "planned", this tests that the "committed" structure is absent in the JSON for that resource -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (7, 24, 100, '2 years', UNIX(7), 'uuid-for-alice', 'alice@Default', UNIX(1000007), NULL, UNIX(100000007), 'planned'); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (7, 24, 100, '2 years', UNIX(7), 'uuid-for-alice', 'alice@Default', UNIX(1000007), NULL, UNIX(100000007), 'planned', '{}'::jsonb); -- on "unshared/things": an active commitment on AZ "any" -INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state) VALUES (8, 15, 1, '2 years', UNIX(8), 'uuid-for-alice', 'alice@Default', UNIX(8), UNIX(8), UNIX(100000008), 'active'); +INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, creation_context_json) VALUES (8, 15, 1, '2 years', UNIX(8), 'uuid-for-alice', 'alice@Default', UNIX(8), UNIX(8), UNIX(100000008), 'active', '{}'::jsonb); -- project_rates also has multiple different setups to test different cases -- berlin has custom rate limits diff --git a/internal/collector/capacity_scrape_test.go b/internal/collector/capacity_scrape_test.go index 0a5f6d55f..a774df581 100644 --- a/internal/collector/capacity_scrape_test.go +++ b/internal/collector/capacity_scrape_test.go @@ -656,8 +656,8 @@ func TestScanCapacityWithMailNotification(t *testing.T) { // (Commitment ID: 11) Confirmed commitment for first/capacity_portion in dresden az-one (amount = 1). _, err = s.DB.Exec(` INSERT INTO project_commitments - (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, notify_on_confirm) - VALUES(11, 27, 1, $1, 'dummy', 'dummy', $2, '2 days', $3, 'planned', true)`, s.Clock.Now(), s.Clock.Now().Add(12*time.Hour), s.Clock.Now().Add(48*time.Hour)) + (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, notify_on_confirm, creation_context_json) + VALUES(11, 27, 1, $1, 'dummy', 'dummy', $2, '2 days', $3, 'planned', true, '{}'::jsonb)`, s.Clock.Now(), s.Clock.Now().Add(12*time.Hour), s.Clock.Now().Add(48*time.Hour)) if err != nil { t.Fatal(err) } @@ -670,7 +670,7 @@ func TestScanCapacityWithMailNotification(t *testing.T) { tr.DBChanges().AssertEqualf(`%s UPDATE project_az_resources SET quota = 10 WHERE id = 18 AND resource_id = 2 AND az = 'az-one'; UPDATE project_commitments SET confirmed_at = %d, state = 'active', notify_on_confirm = TRUE WHERE id = 1 AND transfer_token = NULL; - INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, notify_on_confirm) VALUES (11, 27, 1, '2 days', 10, 'dummy', 'dummy', 43210, 86420, 172810, 'active', TRUE); + INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirm_by, confirmed_at, expires_at, state, notify_on_confirm, creation_context_json) VALUES (11, 27, 1, '2 days', 10, 'dummy', 'dummy', 43210, 86420, 172810, 'active', TRUE, '{}'); INSERT INTO project_mail_notifications (id, project_id, subject, body, next_submission_at) VALUES (1, 1, 'Your recent commitment confirmations', 'Domain:germany Project:berlin Creator:dummy Amount:10 Duration:10 days Date:1970-01-02 Service:first Resource:capacity AZ:az-one', %[2]d); INSERT INTO project_mail_notifications (id, project_id, subject, body, next_submission_at) VALUES (2, 2, 'Your recent commitment confirmations', 'Domain:germany Project:dresden Creator:dummy Amount:1 Duration:2 days Date:1970-01-02 Service:second Resource:capacity AZ:az-one', %[3]d); UPDATE project_resources SET quota = 260 WHERE id = 2 AND service_id = 1 AND name = 'capacity'; @@ -680,15 +680,15 @@ func TestScanCapacityWithMailNotification(t *testing.T) { // (Commitment IDs: 12, 13) Confirmed commitment for first/capacity_portion in dresden az-one (amount = 1). _, err = s.DB.Exec(` INSERT INTO project_commitments - (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, notify_on_confirm) - VALUES(12, 27, 1, $1, 'dummy', 'dummy', '2 days', $2, 'pending', true)`, s.Clock.Now(), s.Clock.Now().Add(48*time.Hour)) + (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, notify_on_confirm, creation_context_json) + VALUES(12, 27, 1, $1, 'dummy', 'dummy', '2 days', $2, 'pending', true, '{}'::jsonb)`, s.Clock.Now(), s.Clock.Now().Add(48*time.Hour)) if err != nil { t.Fatal(err) } _, err = s.DB.Exec(` INSERT INTO project_commitments - (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, notify_on_confirm) - VALUES(13, 27, 1, $1, 'dummy', 'dummy', '2 days', $2, 'pending', true)`, s.Clock.Now(), s.Clock.Now().Add(48*time.Hour)) + (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, notify_on_confirm, creation_context_json) + VALUES(13, 27, 1, $1, 'dummy', 'dummy', '2 days', $2, 'pending', true, '{}'::jsonb)`, s.Clock.Now(), s.Clock.Now().Add(48*time.Hour)) if err != nil { t.Fatal(err) } @@ -700,8 +700,8 @@ func TestScanCapacityWithMailNotification(t *testing.T) { UPDATE project_az_resources SET quota = 7 WHERE id = 26 AND resource_id = 11 AND az = 'any'; UPDATE project_az_resources SET quota = 2 WHERE id = 27 AND resource_id = 11 AND az = 'az-one'; UPDATE project_commitments SET state = 'expired' WHERE id = 11 AND transfer_token = NULL; - INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirmed_at, expires_at, state, notify_on_confirm) VALUES (12, 27, 1, '2 days', 86420, 'dummy', 'dummy', 172830, 259220, 'active', TRUE); - INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirmed_at, expires_at, state, notify_on_confirm) VALUES (13, 27, 1, '2 days', 86420, 'dummy', 'dummy', 172830, 259220, 'active', TRUE); + INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirmed_at, expires_at, state, notify_on_confirm, creation_context_json) VALUES (12, 27, 1, '2 days', 86420, 'dummy', 'dummy', 172830, 259220, 'active', TRUE, '{}'); + INSERT INTO project_commitments (id, az_resource_id, amount, duration, created_at, creator_uuid, creator_name, confirmed_at, expires_at, state, notify_on_confirm, creation_context_json) VALUES (13, 27, 1, '2 days', 86420, 'dummy', 'dummy', 172830, 259220, 'active', TRUE, '{}'); UPDATE project_commitments SET confirmed_at = 172825, state = 'active' WHERE id = 2 AND transfer_token = NULL; UPDATE project_commitments SET state = 'pending' WHERE id = 3 AND transfer_token = NULL; INSERT INTO project_mail_notifications (id, project_id, subject, body, next_submission_at) VALUES (3, 2, 'Your recent commitment confirmations', 'Domain:germany Project:dresden Creator:dummy Amount:1 Duration:2 days Date:1970-01-03 Service:second Resource:capacity AZ:az-one Creator:dummy Amount:1 Duration:2 days Date:1970-01-03 Service:second Resource:capacity AZ:az-one', %d); diff --git a/internal/collector/commitment_cleanup.go b/internal/collector/commitment_cleanup.go index 49bdcd619..341520527 100644 --- a/internal/collector/commitment_cleanup.go +++ b/internal/collector/commitment_cleanup.go @@ -63,23 +63,14 @@ func (c *Collector) cleanupOldCommitments(_ context.Context, _ prometheus.Labels // step 2: delete expired commitments after a grace period // - // NOTE 1: Expired commitments do not contribute to any calculations, so it would + // NOTE: Expired commitments do not contribute to any calculations, so it would // be fine to delete them immediately from a technical perspective. However, // they don't take up that much space in the short run, and having them stick // around in the DB for a little bit (in this case, one month) can // potentially help in investigations when customers complain about // commitments expiring unexpectedly. - // - // NOTE 2: We cannot delete superseded commitments that have undeleted - // successors because the foreign key on project_commitments.predecessor_id - // is intentionally specified as `ON DELETE RESTRICT`. The cleanup query is - // written such that we have to cleanup the successors first, and then the - // predecessors can be deleted recursively. This does not happen all at once, - // but since the jobloop runs every few minutes, it happens quickly enough. query = sqlext.SimplifyWhitespace(` - DELETE FROM project_commitments pc WHERE expires_at + interval '1 month' <= $1 AND id NOT IN ( - SELECT DISTINCT predecessor_id FROM project_commitments WHERE predecessor_id IS NOT NULL - ) + DELETE FROM project_commitments pc WHERE expires_at + interval '1 month' <= $1 `) _, err = c.DB.Exec(query, now) if err != nil { diff --git a/internal/collector/commitment_cleanup_test.go b/internal/collector/commitment_cleanup_test.go index 20f247297..b980ab637 100644 --- a/internal/collector/commitment_cleanup_test.go +++ b/internal/collector/commitment_cleanup_test.go @@ -20,6 +20,7 @@ package collector import ( + "encoding/json" "testing" "time" @@ -71,28 +72,33 @@ func TestCleanupOldCommitmentsJob(t *testing.T) { mustT(t, err) // as a control group, this commitment will not expire for the entire duration of the test + creationContext := db.CommitmentWorkflowContext{Reason: db.CommitmentReasonCreate} + buf, err := json.Marshal(creationContext) + mustT(t, err) mustT(t, c.DB.Insert(&db.ProjectCommitment{ - ID: 1, - AZResourceID: 1, - Amount: 10, - Duration: commitmentForOneDay, - CreatedAt: s.Clock.Now(), - ConfirmedAt: pointerTo(s.Clock.Now()), - ExpiresAt: commitmentForThreeYears.AddTo(s.Clock.Now()), - State: db.CommitmentStateActive, + ID: 1, + AZResourceID: 1, + Amount: 10, + Duration: commitmentForOneDay, + CreatedAt: s.Clock.Now(), + ConfirmedAt: pointerTo(s.Clock.Now()), + ExpiresAt: commitmentForThreeYears.AddTo(s.Clock.Now()), + State: db.CommitmentStateActive, + CreationContextJSON: json.RawMessage(buf), })) // test 1: create an expired commitment s.Clock.StepBy(30 * oneDay) mustT(t, c.DB.Insert(&db.ProjectCommitment{ - ID: 2, - AZResourceID: 1, - Amount: 10, - Duration: commitmentForOneDay, - CreatedAt: s.Clock.Now().Add(-oneDay), - ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay)), - ExpiresAt: s.Clock.Now(), - State: db.CommitmentStateActive, + ID: 2, + AZResourceID: 1, + Amount: 10, + Duration: commitmentForOneDay, + CreatedAt: s.Clock.Now().Add(-oneDay), + ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay)), + ExpiresAt: s.Clock.Now(), + State: db.CommitmentStateActive, + CreationContextJSON: json.RawMessage(buf), })) tr.DBChanges().Ignore() @@ -110,28 +116,39 @@ func TestCleanupOldCommitmentsJob(t *testing.T) { tr.DBChanges().AssertEqualf(`DELETE FROM project_commitments WHERE id = 2 AND transfer_token = NULL;`) // test 2: simulate a commitment that was created yesterday, - // and then moved to a different project five minutes later + // and then converted five minutes later + creationContext = db.CommitmentWorkflowContext{Reason: db.CommitmentReasonCreate} + buf, err = json.Marshal(creationContext) + mustT(t, err) + supersedeContext := db.CommitmentWorkflowContext{Reason: db.CommitmentReasonConvert, RelatedCommitmentIDs: []db.ProjectCommitmentID{4}} + supersedeBuf, err := json.Marshal(supersedeContext) + mustT(t, err) mustT(t, c.DB.Insert(&db.ProjectCommitment{ - ID: 3, - AZResourceID: 1, - Amount: 10, - Duration: commitmentForOneDay, - CreatedAt: s.Clock.Now().Add(-oneDay), - ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay)), - ExpiresAt: s.Clock.Now(), - SupersededAt: pointerTo(s.Clock.Now().Add(-oneDay).Add(5 * time.Minute)), - State: db.CommitmentStateSuperseded, + ID: 3, + AZResourceID: 1, + Amount: 10, + Duration: commitmentForOneDay, + CreatedAt: s.Clock.Now().Add(-oneDay), + ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay)), + ExpiresAt: s.Clock.Now(), + SupersededAt: pointerTo(s.Clock.Now().Add(-oneDay).Add(5 * time.Minute)), + State: db.CommitmentStateSuperseded, + CreationContextJSON: json.RawMessage(buf), + SupersedeContextJSON: pointerTo(json.RawMessage(supersedeBuf)), })) + creationContext = db.CommitmentWorkflowContext{Reason: db.CommitmentReasonConvert, RelatedCommitmentIDs: []db.ProjectCommitmentID{3}} + buf, err = json.Marshal(creationContext) + mustT(t, err) mustT(t, c.DB.Insert(&db.ProjectCommitment{ - ID: 4, - AZResourceID: 2, - Amount: 10, - Duration: commitmentForOneDay, - CreatedAt: s.Clock.Now().Add(-oneDay).Add(5 * time.Minute), - ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay)), - ExpiresAt: s.Clock.Now(), - State: db.CommitmentStateActive, - PredecessorID: pointerTo(db.ProjectCommitmentID(3)), + ID: 4, + AZResourceID: 2, + Amount: 10, + Duration: commitmentForOneDay, + CreatedAt: s.Clock.Now().Add(-oneDay).Add(5 * time.Minute), + ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay)), + ExpiresAt: s.Clock.Now(), + State: db.CommitmentStateActive, + CreationContextJSON: json.RawMessage(buf), })) tr.DBChanges().Ignore() @@ -140,14 +157,74 @@ func TestCleanupOldCommitmentsJob(t *testing.T) { mustT(t, job.ProcessOne(s.Ctx)) tr.DBChanges().AssertEqualf(`UPDATE project_commitments SET state = 'expired' WHERE id = 4 AND transfer_token = NULL;`) - // when cleaning up, the successor commitment needs to be cleaned up first... + // when cleaning up, both commitments should be deleted simultaneously s.Clock.StepBy(40 * oneDay) mustT(t, job.ProcessOne(s.Ctx)) - tr.DBChanges().AssertEqualf(`DELETE FROM project_commitments WHERE id = 4 AND transfer_token = NULL;`) + tr.DBChanges().AssertEqualf(` + DELETE FROM project_commitments WHERE id = 3 AND transfer_token = NULL; + DELETE FROM project_commitments WHERE id = 4 AND transfer_token = NULL; + `) + + // test 3: simulate two commitments with different expiration dates that were merged + creationContext = db.CommitmentWorkflowContext{Reason: db.CommitmentReasonMerge, RelatedCommitmentIDs: []db.ProjectCommitmentID{7}} + buf, err = json.Marshal(creationContext) + mustT(t, err) + commitment5 := db.ProjectCommitment{ + ID: 5, + AZResourceID: 1, + Amount: 10, + Duration: commitmentForOneDay, + CreatedAt: s.Clock.Now().Add(-oneDay), + ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay)), + ExpiresAt: s.Clock.Now(), + SupersededAt: pointerTo(s.Clock.Now().Add(-oneDay).Add(10 * time.Minute)), + State: db.CommitmentStateSuperseded, + CreationContextJSON: json.RawMessage(buf), + } + mustT(t, c.DB.Insert(&commitment5)) + commitment6 := db.ProjectCommitment{ + ID: 6, + AZResourceID: 1, + Amount: 5, + Duration: commitmentForOneDay, + CreatedAt: s.Clock.Now().Add(-oneDay).Add(5 * time.Minute), + ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay).Add(5 * time.Minute)), + ExpiresAt: s.Clock.Now().Add(5 * time.Minute), + SupersededAt: pointerTo(s.Clock.Now().Add(-oneDay).Add(10 * time.Minute)), + State: db.CommitmentStateSuperseded, + CreationContextJSON: buf, + } + mustT(t, c.DB.Insert(&commitment6)) + creationContext = db.CommitmentWorkflowContext{Reason: db.CommitmentReasonMerge, RelatedCommitmentIDs: []db.ProjectCommitmentID{5, 6}} + buf, err = json.Marshal(creationContext) + mustT(t, err) + mustT(t, c.DB.Insert(&db.ProjectCommitment{ + ID: 7, + AZResourceID: 1, + Amount: 15, + Duration: commitmentForOneDay, + CreatedAt: s.Clock.Now().Add(-oneDay).Add(10 * time.Minute), + ConfirmedAt: pointerTo(s.Clock.Now().Add(-oneDay).Add(10 * time.Minute)), + ExpiresAt: s.Clock.Now().Add(5 * time.Minute), + State: db.CommitmentStateActive, + CreationContextJSON: json.RawMessage(buf), + })) + tr.DBChanges().Ignore() + + // only the merged commitment should be set to state expired, + // the superseded commitments should not be touched + s.Clock.StepBy(5 * time.Minute) + mustT(t, job.ProcessOne(s.Ctx)) + tr.DBChanges().AssertEqualf(`UPDATE project_commitments SET state = 'expired' WHERE id = 7 AND transfer_token = NULL;`) - // ...and then the superseded commitment can be cleaned up because it does not have predecessors left + // when cleaning up, all commitments related to the merge should be deleted simultaneously + s.Clock.StepBy(40 * oneDay) mustT(t, job.ProcessOne(s.Ctx)) - tr.DBChanges().AssertEqualf(`DELETE FROM project_commitments WHERE id = 3 AND transfer_token = NULL;`) + tr.DBChanges().AssertEqualf(` + DELETE FROM project_commitments WHERE id = 5 AND transfer_token = NULL; + DELETE FROM project_commitments WHERE id = 6 AND transfer_token = NULL; + DELETE FROM project_commitments WHERE id = 7 AND transfer_token = NULL; + `) } func pointerTo[T any](val T) *T { diff --git a/internal/collector/expiring_commitments_test.go b/internal/collector/expiring_commitments_test.go index a8d6ab726..052a663f8 100644 --- a/internal/collector/expiring_commitments_test.go +++ b/internal/collector/expiring_commitments_test.go @@ -77,7 +77,7 @@ func Test_ExpiringCommitmentNotification(t *testing.T) { originalMailTemplates := mailConfig.Templates mailConfig.Templates = core.MailTemplateConfiguration{ExpiringCommitments: core.MailTemplate{Compiled: template.New("")}} // commitments that are already sent out for a notification are not visible in the result set anymore - a new one gets created. - _, err := s.DB.Exec("INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state) VALUES (99, 1, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(0), 'expired');") + _, err := s.DB.Exec("INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, creation_context_json) VALUES (99, 1, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(0), 'expired', '{}'::jsonb);") tr.DBChanges().Ignore() mustT(t, err) err = (job.ProcessOne(s.Ctx)) diff --git a/internal/collector/fixtures/capacity_scrape_with_commitments.sql b/internal/collector/fixtures/capacity_scrape_with_commitments.sql index 07d67e3db..388a569d8 100644 --- a/internal/collector/fixtures/capacity_scrape_with_commitments.sql +++ b/internal/collector/fixtures/capacity_scrape_with_commitments.sql @@ -90,24 +90,24 @@ INSERT INTO project_az_resources (id, resource_id, az, usage) VALUES (28, 11, 'a -- (the confirm_by and expires_at timestamps are all aligned on day boundaries, i.e. T = 86400 * N for some integer N) -- day 1: just a boring commitment that easily fits in the available capacity -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (1, 18, 10, UNIX(0), 'dummy', 'dummy', UNIX(86400), '10 days', UNIX(950400), 'planned'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (1, 18, 10, UNIX(0), 'dummy', 'dummy', UNIX(86400), '10 days', UNIX(950400), 'planned', '{}'::jsonb); -- day 2: very large commitments that exceed the raw capacity; only the one on "first" works because that service has a large overcommit factor -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (2, 18, 100, UNIX(0), 'dummy', 'dummy', UNIX(172800), '10 days', UNIX(1036800), 'planned'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (3, 21, 100, UNIX(0), 'dummy', 'dummy', UNIX(172800), '10 days', UNIX(1036800), 'planned'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (2, 18, 100, UNIX(0), 'dummy', 'dummy', UNIX(172800), '10 days', UNIX(1036800), 'planned', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (3, 21, 100, UNIX(0), 'dummy', 'dummy', UNIX(172800), '10 days', UNIX(1036800), 'planned', '{}'::jsonb); -- day 3: a bunch of small commitments with different timestamps, to test confirmation order in two ways: -- -- 1. ID=3 does not block these commitments even though it is on the same resource and AZ -- 2. we cannot confirm all of these; which ones are confirmed demonstrates the order of consideration -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (4, 27, 10, UNIX(1), 'dummy', 'dummy', UNIX(259202), '10 days', UNIX(1123200), 'planned'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (5, 27, 10, UNIX(2), 'dummy', 'dummy', UNIX(259201), '10 days', UNIX(1123200), 'planned'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (6, 27, 10, UNIX(3), 'dummy', 'dummy', UNIX(259200), '10 days', UNIX(1123200), 'planned'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (4, 27, 10, UNIX(1), 'dummy', 'dummy', UNIX(259202), '10 days', UNIX(1123200), 'planned', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (5, 27, 10, UNIX(2), 'dummy', 'dummy', UNIX(259201), '10 days', UNIX(1123200), 'planned', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (6, 27, 10, UNIX(3), 'dummy', 'dummy', UNIX(259200), '10 days', UNIX(1123200), 'planned', '{}'::jsonb); -- day 4: test confirmation that is (or is not) blocked by existing usage in other projects (on a capacity of 420, there is already 250 usage in berlin, so only berlin can confirm a commitment for amount = 300, even though dresden asked first) -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (7, 25, 300, UNIX(1), 'dummy', 'dummy', UNIX(345600), '10 days', UNIX(1209600), 'planned'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (8, 19, 300, UNIX(2), 'dummy', 'dummy', UNIX(345600), '10 days', UNIX(1209600), 'planned'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (7, 25, 300, UNIX(1), 'dummy', 'dummy', UNIX(345600), '10 days', UNIX(1209600), 'planned', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (8, 19, 300, UNIX(2), 'dummy', 'dummy', UNIX(345600), '10 days', UNIX(1209600), 'planned', '{}'::jsonb); -- day 5: test commitments that cannot be confirmed until the previous commitment expires (ID=9 is confirmed, and then ID=10 cannot be confirmed until ID=9 expires because ID=9 blocks absolutely all available capacity in that resource and AZ) -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (9, 22, 22, UNIX(1), 'dummy', 'dummy', UNIX(432000), '1 hour', UNIX(435600), 'planned'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (10, 28, 2, UNIX(2), 'dummy', 'dummy', UNIX(432000), '10 days', UNIX(1296000), 'planned'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (9, 22, 22, UNIX(1), 'dummy', 'dummy', UNIX(432000), '1 hour', UNIX(435600), 'planned', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (10, 28, 2, UNIX(2), 'dummy', 'dummy', UNIX(432000), '10 days', UNIX(1296000), 'planned', '{}'::jsonb); diff --git a/internal/collector/fixtures/mail_expiring_commitments.sql b/internal/collector/fixtures/mail_expiring_commitments.sql index 9840d625e..aa8ee2841 100644 --- a/internal/collector/fixtures/mail_expiring_commitments.sql +++ b/internal/collector/fixtures/mail_expiring_commitments.sql @@ -17,17 +17,17 @@ INSERT INTO project_az_resources (id, resource_id, az, usage) VALUES (3, 2, 'az INSERT INTO project_az_resources (id, resource_id, az, usage) VALUES (4, 2, 'az-two', 0); -- active/planned commitments should be ignored -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (1, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(86400), '1 year', UNIX(31536000), 'planned'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state) VALUES (2, 1, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(31536000), 'active'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (3, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(5097600), '10 days', UNIX(5875200), 'planned'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (1, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(86400), '1 year', UNIX(31536000), 'planned', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, creation_context_json) VALUES (2, 1, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(31536000), 'active', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (3, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(5097600), '10 days', UNIX(5875200), 'planned', '{}'::jsonb); -- expiring commitments for each project -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state) VALUES (4, 1, 5, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(0), 'expired'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state) VALUES (5, 2, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(0), 'expired'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, creation_context_json) VALUES (4, 1, 5, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(0), 'expired', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, creation_context_json) VALUES (5, 2, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(0), 'expired', '{}'::jsonb); -- expiring commitments, marked as one year to make them pass the short-term commitment check, but they will expire within the scrape timeframe. -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state) VALUES (6, 3, 5, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(2246400), 'active'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state) VALUES (7, 4, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(2246400), 'active'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, creation_context_json) VALUES (6, 3, 5, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(2246400), 'active', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, duration, expires_at, state, creation_context_json) VALUES (7, 4, 10, UNIX(0), 'dummy', 'dummy', '1 year', UNIX(2246400), 'active', '{}'::jsonb); -- expiring short-term commitments should not be queued and be marked as notified -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state) VALUES (8, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(86400), '10 days', UNIX(950400), 'planned'); -INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirmed_at, duration, expires_at, state) VALUES (9, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(0), '10 days', UNIX(777600), 'active'); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirm_by, duration, expires_at, state, creation_context_json) VALUES (8, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(86400), '10 days', UNIX(950400), 'planned', '{}'::jsonb); +INSERT INTO project_commitments (id, az_resource_id, amount, created_at, creator_uuid, creator_name, confirmed_at, duration, expires_at, state, creation_context_json) VALUES (9, 1, 10, UNIX(0), 'dummy', 'dummy', UNIX(0), '10 days', UNIX(777600), 'active', '{}'::jsonb); diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index 9ecc7a239..ac6f8918e 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -21,6 +21,7 @@ package collector import ( "database/sql" + "encoding/json" "errors" "net/http" "regexp" @@ -298,41 +299,47 @@ func Test_ScrapeSuccess(t *testing.T) { mustT(t, err) now := s.Clock.Now() // AZResourceID = 2 has two commitments in state "active" to test summing by state + creationContext := db.CommitmentWorkflowContext{Reason: db.CommitmentReasonCreate} + buf, err := json.Marshal(creationContext) + mustT(t, err) for _, amount := range []uint64{7, 8} { mustT(t, s.DB.Insert(&db.ProjectCommitment{ - AZResourceID: 2, - Amount: amount, - Duration: commitmentForOneYear, - CreatedAt: now, - CreatorUUID: "dummy", - CreatorName: "dummy", - ConfirmedAt: &now, - ExpiresAt: commitmentForOneYear.AddTo(now), - State: db.CommitmentStateActive, + AZResourceID: 2, + Amount: amount, + Duration: commitmentForOneYear, + CreatedAt: now, + CreatorUUID: "dummy", + CreatorName: "dummy", + ConfirmedAt: &now, + ExpiresAt: commitmentForOneYear.AddTo(now), + State: db.CommitmentStateActive, + CreationContextJSON: buf, })) } // AZResourceID = 11 has two commitments in different states to test aggregation over different states mustT(t, s.DB.Insert(&db.ProjectCommitment{ - AZResourceID: 11, - Amount: 10, - Duration: commitmentForOneYear, - CreatedAt: now, - CreatorUUID: "dummy", - CreatorName: "dummy", - ConfirmedAt: &now, - ExpiresAt: commitmentForOneYear.AddTo(now), - State: db.CommitmentStateActive, + AZResourceID: 11, + Amount: 10, + Duration: commitmentForOneYear, + CreatedAt: now, + CreatorUUID: "dummy", + CreatorName: "dummy", + ConfirmedAt: &now, + ExpiresAt: commitmentForOneYear.AddTo(now), + State: db.CommitmentStateActive, + CreationContextJSON: buf, })) mustT(t, s.DB.Insert(&db.ProjectCommitment{ - AZResourceID: 11, - Amount: 10, - Duration: commitmentForOneYear, - CreatedAt: now, - CreatorUUID: "dummy", - CreatorName: "dummy", - ConfirmBy: &now, - ExpiresAt: commitmentForOneYear.AddTo(now), - State: db.CommitmentStatePending, + AZResourceID: 11, + Amount: 10, + Duration: commitmentForOneYear, + CreatedAt: now, + CreatorUUID: "dummy", + CreatorName: "dummy", + ConfirmBy: &now, + ExpiresAt: commitmentForOneYear.AddTo(now), + State: db.CommitmentStatePending, + CreationContextJSON: buf, })) // check data metrics generated by this scraping pass diff --git a/internal/db/migrations.go b/internal/db/migrations.go index 100fabf98..8bd09865b 100644 --- a/internal/db/migrations.go +++ b/internal/db/migrations.go @@ -216,4 +216,97 @@ var sqlMigrations = map[string]string{ failed_submissions BIGINT NOT NULL DEFAULT 0 ); `, + "050_commitment_workflow_context.down.sql": ` + -- We will probably not need this, no implementation for now + `, + "050_commitment_workflow_context.up.sql": ` + -- Step 1: Create new fields for commitment workflow contexts + ALTER TABLE project_commitments + ADD COLUMN creation_context_json JSONB, + ADD COLUMN supersede_context_json JSONB; + + -- Step 2: Populate creation context + WITH creation_context_data AS ( + SELECT pc.id as commitment_id, pc.predecessor_id, + CASE + WHEN pc.predecessor_id IS NULL THEN 'create' + WHEN EXISTS ( + SELECT 1 + FROM project_commitments pc2 + -- Since the az_resource_id can change if a commitment is transferred to a different project, + -- we need to join up to project_services and compare the service type and resource name + JOIN project_az_resources pc2_az_res ON pc2.az_resource_id = pc2_az_res.id + JOIN project_resources pc2_res ON pc2_az_res.resource_id = pc2_res.id + JOIN project_services pc2_srv ON pc2_res.service_id = pc2_srv.id + JOIN project_az_resources pc_az_res ON pc.az_resource_id = pc_az_res.id + JOIN project_resources pc_res ON pc_az_res.resource_id = pc_res.id + JOIN project_services pc_srv ON pc_res.service_id = pc_srv.id + WHERE pc2.id = pc.predecessor_id + AND pc2_res.name = pc_res.name + AND pc2_srv.type = pc_srv.type + ) THEN 'split' + ELSE 'convert' + END AS creation_reason + FROM project_commitments pc + ) + UPDATE project_commitments + SET creation_context_json = jsonb_build_object( + 'reason', creation_context_data.creation_reason, + 'related_ids', + CASE + WHEN creation_context_data.predecessor_id IS NULL THEN '[]'::jsonb + ELSE jsonb_build_array(creation_context_data.predecessor_id) + END + ) + FROM creation_context_data + WHERE project_commitments.id = creation_context_data.commitment_id; + + -- Step 3: Make creation context mandatory after populating with values + ALTER TABLE project_commitments + ALTER COLUMN creation_context_json SET NOT NULL; + + -- Step 4: Populate supersede context + WITH supersede_context_data AS ( + SELECT pc.id AS superseded_id, pc2.id AS successor_id, pc2.az_resource_id AS successor_az_resource_id, + CASE + WHEN EXISTS ( + SELECT 1 + FROM project_az_resources pc2_az_res + JOIN project_resources pc2_res ON pc2_az_res.resource_id = pc2_res.id + JOIN project_services pc2_srv ON pc2_res.service_id = pc2_srv.id + JOIN project_az_resources pc_az_res ON pc.az_resource_id = pc_az_res.id + JOIN project_resources pc_res ON pc_az_res.resource_id = pc_res.id + JOIN project_services pc_srv ON pc_res.service_id = pc_srv.id + WHERE pc2_az_res.id = pc2.az_resource_id + AND pc2_res.name = pc_res.name + AND pc2_srv.type = pc_srv.type + ) THEN 'split' + ELSE 'convert' + END AS supersede_reason + FROM project_commitments pc + JOIN project_commitments pc2 + ON pc.id = pc2.predecessor_id + WHERE pc.state = 'superseded' + ), + -- When splitting or during conversion, it is possible that two or more successor commits are created + aggregated_successors AS ( + SELECT superseded_id, + jsonb_agg(successor_id) AS related_successors + FROM supersede_context_data + GROUP BY superseded_id + ) + UPDATE project_commitments p1 + SET supersede_context_json = jsonb_build_object( + 'reason', scd.supersede_reason, + 'related_ids', aggregated_successors.related_successors + ) + FROM supersede_context_data scd + JOIN aggregated_successors + ON scd.superseded_id = aggregated_successors.superseded_id + WHERE p1.id = scd.superseded_id; + + -- Step 5: Remove deprecated field predecessor_id + ALTER TABLE project_commitments + DROP COLUMN predecessor_id; + `, } diff --git a/internal/db/models.go b/internal/db/models.go index 28d8565b1..473cfca56 100644 --- a/internal/db/models.go +++ b/internal/db/models.go @@ -20,6 +20,7 @@ package db import ( + "encoding/json" "time" "github.com/go-gorp/gorp/v3" @@ -170,11 +171,11 @@ type ProjectCommitment struct { ConfirmedAt *time.Time `db:"confirmed_at"` ExpiresAt time.Time `db:"expires_at"` - // A commitment can be superseded e.g. by splitting it into smaller parts. - // When that happens, the new commitments will point to the one that they - // superseded through the PredecessorID field. - SupersededAt *time.Time `db:"superseded_at"` - PredecessorID *ProjectCommitmentID `db:"predecessor_id"` + // Commitments can be superseded due to splits, conversions or merges. + // The context columns contain information about the reason and related commitments + SupersededAt *time.Time `db:"superseded_at"` + CreationContextJSON json.RawMessage `db:"creation_context_json"` + SupersedeContextJSON *json.RawMessage `db:"supersede_context_json"` // For a commitment to be transferred between projects, it must first be // marked for transfer in the source project. Then a new commitment can be @@ -215,6 +216,23 @@ const ( CommitmentStateExpired CommitmentState = "expired" ) +// CommitmentWorkflowContext is the type definition for the JSON payload in the +// CreationContextJSON and SupersedeContextJSON fields of type ProjectCommitment. +type CommitmentWorkflowContext struct { + Reason CommitmentReason `json:"reason"` + RelatedCommitmentIDs []ProjectCommitmentID `json:"related_ids,omitempty"` +} + +// CommitmentReason is an enum. It appears in type CommitmentWorkflowContext. +type CommitmentReason string + +const ( + CommitmentReasonCreate CommitmentReason = "create" + CommitmentReasonSplit CommitmentReason = "split" + CommitmentReasonConvert CommitmentReason = "convert" + CommitmentReasonMerge CommitmentReason = "merge" +) + type MailNotification struct { ID int64 `db:"id"` ProjectID ProjectID `db:"project_id"` diff --git a/internal/test/setup.go b/internal/test/setup.go index 2e850b19a..62f7d22e5 100644 --- a/internal/test/setup.go +++ b/internal/test/setup.go @@ -36,7 +36,6 @@ import ( "github.com/sapcc/go-bits/logg" "github.com/sapcc/go-bits/mock" "github.com/sapcc/go-bits/osext" - "github.com/sapcc/go-bits/sqlext" "gopkg.in/yaml.v2" "github.com/sapcc/limes/internal/core" @@ -158,17 +157,9 @@ func NewSetup(t *testing.T, opts ...SetupOption) Setup { return s } -var cleanupProjectCommitmentsQuery = sqlext.SimplifyWhitespace(` - DELETE FROM project_commitments WHERE id NOT IN ( - SELECT predecessor_id FROM project_commitments WHERE predecessor_id IS NOT NULL - ) -`) - func initDatabase(t *testing.T, extraOpts []easypg.TestSetupOption) *gorp.DbMap { opts := append(slices.Clone(extraOpts), - // project_commitments needs a specialized cleanup strategy because of an "ON DELETE RESTRICT" constraint - easypg.ClearContentsWith(cleanupProjectCommitmentsQuery), - easypg.ClearTables("cluster_capacitors", "cluster_services", "domains"), + easypg.ClearTables("project_commitments", "cluster_capacitors", "cluster_services", "domains"), easypg.ResetPrimaryKeys( "cluster_services", "cluster_resources", "cluster_az_resources", "domains", "projects", "project_commitments", "project_mail_notifications",