Skip to content

Commit 21e7a35

Browse files
committed
move buildworkspace -> apply
1 parent 5f6c030 commit 21e7a35

File tree

2 files changed

+109
-114
lines changed

2 files changed

+109
-114
lines changed

provisionerd/runner/apply.go

Lines changed: 64 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,64 @@
1+
package runner
2+
3+
import (
4+
"context"
5+
"time"
6+
7+
"cdr.dev/slog"
8+
"github.com/coder/coder/v2/provisionerd/proto"
9+
sdkproto "github.com/coder/coder/v2/provisionersdk/proto"
10+
)
11+
12+
func (r *Runner) apply(ctx context.Context, stage string, req *sdkproto.ApplyRequest) (
13+
*sdkproto.ApplyComplete, *proto.FailedJob,
14+
) {
15+
// use the notStopped so that if we attempt to gracefully cancel, the stream
16+
// will still be available for us to send the cancel to the provisioner
17+
err := r.session.Send(&sdkproto.Request{Type: &sdkproto.Request_Apply{Apply: req}})
18+
if err != nil {
19+
return nil, r.failedWorkspaceBuildf("start provision: %s", err)
20+
}
21+
nevermind := make(chan struct{})
22+
defer close(nevermind)
23+
go func() {
24+
select {
25+
case <-nevermind:
26+
return
27+
case <-r.notStopped.Done():
28+
return
29+
case <-r.notCanceled.Done():
30+
_ = r.session.Send(&sdkproto.Request{
31+
Type: &sdkproto.Request_Cancel{
32+
Cancel: &sdkproto.CancelRequest{},
33+
},
34+
})
35+
}
36+
}()
37+
38+
for {
39+
msg, err := r.session.Recv()
40+
if err != nil {
41+
return nil, r.failedWorkspaceBuildf("recv workspace provision: %s", err)
42+
}
43+
switch msgType := msg.Type.(type) {
44+
case *sdkproto.Response_Log:
45+
r.logProvisionerJobLog(context.Background(), msgType.Log.Level, "workspace provisioner job logged",
46+
slog.F("level", msgType.Log.Level),
47+
slog.F("output", msgType.Log.Output),
48+
slog.F("workspace_build_id", r.job.GetWorkspaceBuild().WorkspaceBuildId),
49+
)
50+
51+
r.queueLog(ctx, &proto.Log{
52+
Source: proto.LogSource_PROVISIONER,
53+
Level: msgType.Log.Level,
54+
CreatedAt: time.Now().UnixMilli(),
55+
Output: msgType.Log.Output,
56+
Stage: stage,
57+
})
58+
case *sdkproto.Response_Apply:
59+
return msgType.Apply, nil
60+
default:
61+
return nil, r.failedJobf("unexpected plan response type %T", msg.Type)
62+
}
63+
}
64+
}

provisionerd/runner/runner.go

Lines changed: 45 additions & 114 deletions
Original file line numberDiff line numberDiff line change
@@ -708,25 +708,6 @@ func (r *Runner) runTemplateImportProvisionWithRichParameters(
708708
stage = "Detecting ephemeral resources"
709709
}
710710

711-
// use the notStopped so that if we attempt to gracefully cancel, the stream will still be available for us
712-
// to send the cancel to the provisioner
713-
nevermind := make(chan struct{})
714-
defer close(nevermind)
715-
go func() {
716-
select {
717-
case <-nevermind:
718-
return
719-
case <-r.notStopped.Done():
720-
return
721-
case <-r.notCanceled.Done():
722-
_ = r.session.Send(&sdkproto.Request{
723-
Type: &sdkproto.Request_Cancel{
724-
Cancel: &sdkproto.CancelRequest{},
725-
},
726-
})
727-
}
728-
}()
729-
730711
planComplete, failed := r.plan(ctx, stage, &sdkproto.PlanRequest{
731712
Metadata: metadata,
732713
RichParameterValues: richParameterValues,
@@ -840,63 +821,6 @@ func (r *Runner) runTemplateDryRun(ctx context.Context) (*proto.CompletedJob, *p
840821
}, nil
841822
}
842823

843-
func (r *Runner) buildWorkspace(ctx context.Context, stage string, req *sdkproto.Request) (
844-
*sdkproto.Response, *proto.FailedJob,
845-
) {
846-
// use the notStopped so that if we attempt to gracefully cancel, the stream
847-
// will still be available for us to send the cancel to the provisioner
848-
err := r.session.Send(req)
849-
if err != nil {
850-
return nil, r.failedWorkspaceBuildf("start provision: %s", err)
851-
}
852-
nevermind := make(chan struct{})
853-
defer close(nevermind)
854-
go func() {
855-
select {
856-
case <-nevermind:
857-
return
858-
case <-r.notStopped.Done():
859-
return
860-
case <-r.notCanceled.Done():
861-
_ = r.session.Send(&sdkproto.Request{
862-
Type: &sdkproto.Request_Cancel{
863-
Cancel: &sdkproto.CancelRequest{},
864-
},
865-
})
866-
}
867-
}()
868-
869-
for {
870-
msg, err := r.session.Recv()
871-
if err != nil {
872-
return nil, r.failedWorkspaceBuildf("recv workspace provision: %s", err)
873-
}
874-
switch msgType := msg.Type.(type) {
875-
case *sdkproto.Response_Log:
876-
r.logProvisionerJobLog(context.Background(), msgType.Log.Level, "workspace provisioner job logged",
877-
slog.F("level", msgType.Log.Level),
878-
slog.F("output", msgType.Log.Output),
879-
slog.F("workspace_build_id", r.job.GetWorkspaceBuild().WorkspaceBuildId),
880-
)
881-
882-
r.queueLog(ctx, &proto.Log{
883-
Source: proto.LogSource_PROVISIONER,
884-
Level: msgType.Log.Level,
885-
CreatedAt: time.Now().UnixMilli(),
886-
Output: msgType.Log.Output,
887-
Stage: stage,
888-
})
889-
case *sdkproto.Response_DataUpload:
890-
continue // Only for template imports
891-
case *sdkproto.Response_ChunkPiece:
892-
continue // Only for template imports
893-
default:
894-
// Stop looping!
895-
return msg, nil
896-
}
897-
}
898-
}
899-
900824
func (r *Runner) commitQuota(ctx context.Context, cost int32) *proto.FailedJob {
901825
r.logger.Debug(ctx, "committing quota",
902826
slog.F("cost", cost),
@@ -908,8 +832,7 @@ func (r *Runner) commitQuota(ctx context.Context, cost int32) *proto.FailedJob {
908832
const stage = "Commit quota"
909833

910834
resp, err := r.quotaCommitter.CommitQuota(ctx, &proto.CommitQuotaRequest{
911-
JobId: r.job.JobId,
912-
// #nosec G115 - Safe conversion as cost is expected to be within int32 range for provisioning costs
835+
JobId: r.job.JobId,
913836
DailyCost: cost,
914837
})
915838
if err != nil {
@@ -978,6 +901,9 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
978901
return nil, failedJob
979902
}
980903

904+
// timings collects all timings from each phase of the build
905+
timings := make([]*sdkproto.Timing, 0)
906+
981907
// Initialize the Terraform working directory
982908
initComplete, failedJob := r.init(ctx, true, r.job.GetTemplateSourceArchive())
983909
if failedJob != nil {
@@ -986,16 +912,21 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
986912
if initComplete == nil {
987913
return nil, r.failedWorkspaceBuildf("invalid message type received from provisioner during init")
988914
}
915+
// Collect init timings
916+
timings = append(timings, initComplete.Timings...)
989917
if initComplete.Error != "" {
990-
r.logger.Warn(context.Background(), "plan request failed",
918+
r.logger.Warn(context.Background(), "init request failed",
991919
slog.F("error", initComplete.Error),
992920
)
993921

994922
return nil, &proto.FailedJob{
995923
JobId: r.job.JobId,
996924
Error: initComplete.Error,
997925
Type: &proto.FailedJob_WorkspaceBuild_{
998-
WorkspaceBuild: &proto.FailedJob_WorkspaceBuild{},
926+
WorkspaceBuild: &proto.FailedJob_WorkspaceBuild{
927+
State: r.job.GetWorkspaceBuild().State,
928+
Timings: timings,
929+
},
999930
},
1000931
}
1001932
}
@@ -1015,6 +946,8 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
1015946
if planComplete == nil {
1016947
return nil, r.failedWorkspaceBuildf("invalid message type received from provisioner during plan")
1017948
}
949+
// Collect plan timings
950+
timings = append(timings, planComplete.Timings...)
1018951
if planComplete.Error != "" {
1019952
r.logger.Warn(context.Background(), "plan request failed",
1020953
slog.F("error", planComplete.Error),
@@ -1024,7 +957,9 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
1024957
JobId: r.job.JobId,
1025958
Error: planComplete.Error,
1026959
Type: &proto.FailedJob_WorkspaceBuild_{
1027-
WorkspaceBuild: &proto.FailedJob_WorkspaceBuild{},
960+
WorkspaceBuild: &proto.FailedJob_WorkspaceBuild{
961+
Timings: timings,
962+
},
1028963
},
1029964
}
1030965
}
@@ -1051,19 +986,33 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
1051986
CreatedAt: time.Now().UnixMilli(),
1052987
})
1053988

1054-
resp, failed := r.buildWorkspace(ctx, applyStage, &sdkproto.Request{
1055-
Type: &sdkproto.Request_Apply{
1056-
Apply: &sdkproto.ApplyRequest{
1057-
Metadata: r.job.GetWorkspaceBuild().Metadata,
1058-
},
1059-
},
989+
applyComplete, failed := r.apply(ctx, applyStage, &sdkproto.ApplyRequest{
990+
Metadata: r.job.GetWorkspaceBuild().Metadata,
1060991
})
1061992
if failed != nil {
1062993
return nil, failed
1063994
}
1064-
applyComplete := resp.GetApply()
1065995
if applyComplete == nil {
1066-
return nil, r.failedWorkspaceBuildf("invalid message type %T received from provisioner", resp.Type)
996+
return nil, r.failedWorkspaceBuildf("invalid message type received from provisioner during apply")
997+
}
998+
// Collect apply timings
999+
timings = append(timings, applyComplete.Timings...)
1000+
if applyComplete.Error != "" {
1001+
r.logger.Warn(context.Background(), "apply failed; updating state",
1002+
slog.F("error", applyComplete.Error),
1003+
slog.F("state_len", len(applyComplete.State)),
1004+
)
1005+
1006+
return nil, &proto.FailedJob{
1007+
JobId: r.job.JobId,
1008+
Error: applyComplete.Error,
1009+
Type: &proto.FailedJob_WorkspaceBuild_{
1010+
WorkspaceBuild: &proto.FailedJob_WorkspaceBuild{
1011+
State: applyComplete.State,
1012+
Timings: timings,
1013+
},
1014+
},
1015+
}
10671016
}
10681017

10691018
// Run Terraform Graph
@@ -1075,8 +1024,10 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
10751024
return nil, failed
10761025
}
10771026
if graphComplete == nil {
1078-
return nil, r.failedWorkspaceBuildf("invalid message type %T received from provisioner", resp.Type)
1027+
return nil, r.failedWorkspaceBuildf("invalid message type received from provisioner during graph")
10791028
}
1029+
// Collect graph timings
1030+
timings = append(timings, graphComplete.Timings...)
10801031
if graphComplete.Error != "" {
10811032
r.logger.Warn(context.Background(), "graph request failed",
10821033
slog.F("error", planComplete.Error),
@@ -1085,31 +1036,9 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
10851036
return nil, &proto.FailedJob{
10861037
JobId: r.job.JobId,
10871038
Error: graphComplete.Error,
1088-
Type: &proto.FailedJob_WorkspaceBuild_{
1089-
WorkspaceBuild: &proto.FailedJob_WorkspaceBuild{},
1090-
},
1091-
}
1092-
}
1093-
1094-
// Prepend the plan timings (since they occurred first).
1095-
// Build up the full timing list in chronological order.
1096-
timings := make([]*sdkproto.Timing, 0, len(initComplete.Timings)+len(planComplete.Timings)+len(applyComplete.Timings)+len(graphComplete.Timings))
1097-
timings = append(timings, initComplete.Timings...)
1098-
timings = append(timings, planComplete.Timings...)
1099-
timings = append(timings, applyComplete.Timings...)
1100-
timings = append(timings, graphComplete.Timings...)
1101-
1102-
if applyComplete.Error != "" {
1103-
r.logger.Warn(context.Background(), "apply failed; updating state",
1104-
slog.F("error", applyComplete.Error),
1105-
slog.F("state_len", len(applyComplete.State)),
1106-
)
1107-
1108-
return nil, &proto.FailedJob{
1109-
JobId: r.job.JobId,
1110-
Error: applyComplete.Error,
11111039
Type: &proto.FailedJob_WorkspaceBuild_{
11121040
WorkspaceBuild: &proto.FailedJob_WorkspaceBuild{
1041+
// Graph does not change the state, so return the state returned from apply.
11131042
State: applyComplete.State,
11141043
Timings: timings,
11151044
},
@@ -1131,7 +1060,9 @@ func (r *Runner) runWorkspaceBuild(ctx context.Context) (*proto.CompletedJob, *p
11311060
State: applyComplete.State,
11321061
Resources: graphComplete.Resources,
11331062
Timings: timings,
1134-
Modules: initComplete.Modules,
1063+
// Modules files are omitted for workspace builds, but the modules.json metadata
1064+
// is available from init to return.
1065+
Modules: initComplete.Modules,
11351066
// Resource replacements are discovered at plan time, only.
11361067
ResourceReplacements: planComplete.ResourceReplacements,
11371068
AiTasks: graphComplete.AiTasks,

0 commit comments

Comments
 (0)