diff --git a/.gitattributes b/.gitattributes index ca878291fe0b5..003a35b526213 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,5 @@ # Generated files +agent/agentcontainers/acmock/acmock.go linguist-generated=true coderd/apidoc/docs.go linguist-generated=true docs/reference/api/*.md linguist-generated=true docs/reference/cli/*.md linguist-generated=true diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 7e1d811e08185..64059f413f5ad 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -961,6 +961,15 @@ jobs: - name: Setup Go uses: ./.github/actions/setup-go + # Needed to build dylibs. + - name: go install tools + run: | + go install google.golang.org/protobuf/cmd/protoc-gen-go@v1.30 + go install storj.io/drpc/cmd/protoc-gen-go-drpc@v0.0.34 + go install golang.org/x/tools/cmd/goimports@latest + go install github.com/mikefarah/yq/v4@v4.44.3 + go install go.uber.org/mock/mockgen@v0.5.0 + - name: Install rcodesign if: ${{ github.repository_owner == 'coder' && github.ref == 'refs/heads/main' }} run: | diff --git a/Makefile b/Makefile index d71b1173f36b7..fe553324cd339 100644 --- a/Makefile +++ b/Makefile @@ -563,7 +563,8 @@ GEN_FILES := \ site/e2e/provisionerGenerated.ts \ examples/examples.gen.json \ $(TAILNETTEST_MOCKS) \ - coderd/database/pubsub/psmock/psmock.go + coderd/database/pubsub/psmock/psmock.go \ + agent/agentcontainers/acmock/acmock.go # all gen targets should be added here and to gen/mark-fresh @@ -629,6 +630,9 @@ coderd/database/dbmock/dbmock.go: coderd/database/db.go coderd/database/querier. coderd/database/pubsub/psmock/psmock.go: coderd/database/pubsub/pubsub.go go generate ./coderd/database/pubsub/psmock +agent/agentcontainers/acmock/acmock.go: agent/agentcontainers/containers.go + go generate ./agent/agentcontainers/acmock/ + $(TAILNETTEST_MOCKS): tailnet/coordinator.go tailnet/service.go go generate ./tailnet/tailnettest/ diff --git a/agent/agent.go b/agent/agent.go index 2daba701b4e89..cfaa0a6e638ee 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -33,6 +33,7 @@ import ( "tailscale.com/util/clientmetric" "cdr.dev/slog" + "github.com/coder/coder/v2/agent/agentcontainers" "github.com/coder/coder/v2/agent/agentexec" "github.com/coder/coder/v2/agent/agentscripts" "github.com/coder/coder/v2/agent/agentssh" @@ -82,6 +83,7 @@ type Options struct { ServiceBannerRefreshInterval time.Duration BlockFileTransfer bool Execer agentexec.Execer + ContainerLister agentcontainers.Lister } type Client interface { @@ -122,7 +124,7 @@ func New(options Options) Agent { options.ScriptDataDir = options.TempDir } if options.ExchangeToken == nil { - options.ExchangeToken = func(ctx context.Context) (string, error) { + options.ExchangeToken = func(_ context.Context) (string, error) { return "", nil } } @@ -144,6 +146,9 @@ func New(options Options) Agent { if options.Execer == nil { options.Execer = agentexec.DefaultExecer } + if options.ContainerLister == nil { + options.ContainerLister = agentcontainers.NewDocker(options.Execer) + } hardCtx, hardCancel := context.WithCancel(context.Background()) gracefulCtx, gracefulCancel := context.WithCancel(hardCtx) @@ -178,6 +183,7 @@ func New(options Options) Agent { prometheusRegistry: prometheusRegistry, metrics: newAgentMetrics(prometheusRegistry), execer: options.Execer, + lister: options.ContainerLister, } // Initially, we have a closed channel, reflecting the fact that we are not initially connected. // Each time we connect we replace the channel (while holding the closeMutex) with a new one @@ -247,6 +253,7 @@ type agent struct { // labeled in Coder with the agent + workspace. metrics *agentMetrics execer agentexec.Execer + lister agentcontainers.Lister } func (a *agent) TailnetConn() *tailnet.Conn { diff --git a/agent/agentcontainers/acmock/acmock.go b/agent/agentcontainers/acmock/acmock.go new file mode 100644 index 0000000000000..93c84e8c54fd3 --- /dev/null +++ b/agent/agentcontainers/acmock/acmock.go @@ -0,0 +1,57 @@ +// Code generated by MockGen. DO NOT EDIT. +// Source: .. (interfaces: Lister) +// +// Generated by this command: +// +// mockgen -destination ./acmock.go -package acmock .. Lister +// + +// Package acmock is a generated GoMock package. +package acmock + +import ( + context "context" + reflect "reflect" + + codersdk "github.com/coder/coder/v2/codersdk" + gomock "go.uber.org/mock/gomock" +) + +// MockLister is a mock of Lister interface. +type MockLister struct { + ctrl *gomock.Controller + recorder *MockListerMockRecorder + isgomock struct{} +} + +// MockListerMockRecorder is the mock recorder for MockLister. +type MockListerMockRecorder struct { + mock *MockLister +} + +// NewMockLister creates a new mock instance. +func NewMockLister(ctrl *gomock.Controller) *MockLister { + mock := &MockLister{ctrl: ctrl} + mock.recorder = &MockListerMockRecorder{mock} + return mock +} + +// EXPECT returns an object that allows the caller to indicate expected use. +func (m *MockLister) EXPECT() *MockListerMockRecorder { + return m.recorder +} + +// List mocks base method. +func (m *MockLister) List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "List", ctx) + ret0, _ := ret[0].(codersdk.WorkspaceAgentListContainersResponse) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// List indicates an expected call of List. +func (mr *MockListerMockRecorder) List(ctx any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "List", reflect.TypeOf((*MockLister)(nil).List), ctx) +} diff --git a/agent/agentcontainers/acmock/doc.go b/agent/agentcontainers/acmock/doc.go new file mode 100644 index 0000000000000..47679708b0fc8 --- /dev/null +++ b/agent/agentcontainers/acmock/doc.go @@ -0,0 +1,4 @@ +// Package acmock contains a mock implementation of agentcontainers.Lister for use in tests. +package acmock + +//go:generate mockgen -destination ./acmock.go -package acmock .. Lister diff --git a/agent/agentcontainers/containers.go b/agent/agentcontainers/containers.go new file mode 100644 index 0000000000000..8578f03337fbe --- /dev/null +++ b/agent/agentcontainers/containers.go @@ -0,0 +1,142 @@ +package agentcontainers + +import ( + "context" + "errors" + "net/http" + "slices" + "time" + + "golang.org/x/xerrors" + + "github.com/coder/coder/v2/coderd/httpapi" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/quartz" +) + +const ( + defaultGetContainersCacheDuration = 10 * time.Second + dockerCreatedAtTimeFormat = "2006-01-02 15:04:05 -0700 MST" + getContainersTimeout = 5 * time.Second +) + +type devcontainersHandler struct { + cacheDuration time.Duration + cl Lister + clock quartz.Clock + + // lockCh protects the below fields. We use a channel instead of a mutex so we + // can handle cancellation properly. + lockCh chan struct{} + containers *codersdk.WorkspaceAgentListContainersResponse + mtime time.Time +} + +// Option is a functional option for devcontainersHandler. +type Option func(*devcontainersHandler) + +// WithLister sets the agentcontainers.Lister implementation to use. +// The default implementation uses the Docker CLI to list containers. +func WithLister(cl Lister) Option { + return func(ch *devcontainersHandler) { + ch.cl = cl + } +} + +// New returns a new devcontainersHandler with the given options applied. +func New(options ...Option) http.Handler { + ch := &devcontainersHandler{ + lockCh: make(chan struct{}, 1), + } + for _, opt := range options { + opt(ch) + } + return ch +} + +func (ch *devcontainersHandler) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + select { + case <-r.Context().Done(): + // Client went away. + return + default: + ct, err := ch.getContainers(r.Context()) + if err != nil { + if errors.Is(err, context.Canceled) { + httpapi.Write(r.Context(), rw, http.StatusRequestTimeout, codersdk.Response{ + Message: "Could not get containers.", + Detail: "Took too long to list containers.", + }) + return + } + httpapi.Write(r.Context(), rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Could not get containers.", + Detail: err.Error(), + }) + return + } + + httpapi.Write(r.Context(), rw, http.StatusOK, ct) + } +} + +func (ch *devcontainersHandler) getContainers(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) { + select { + case <-ctx.Done(): + return codersdk.WorkspaceAgentListContainersResponse{}, ctx.Err() + default: + ch.lockCh <- struct{}{} + } + defer func() { + <-ch.lockCh + }() + + // make zero-value usable + if ch.cacheDuration == 0 { + ch.cacheDuration = defaultGetContainersCacheDuration + } + if ch.cl == nil { + ch.cl = &DockerCLILister{} + } + if ch.containers == nil { + ch.containers = &codersdk.WorkspaceAgentListContainersResponse{} + } + if ch.clock == nil { + ch.clock = quartz.NewReal() + } + + now := ch.clock.Now() + if now.Sub(ch.mtime) < ch.cacheDuration { + // Return a copy of the cached data to avoid accidental modification by the caller. + cpy := codersdk.WorkspaceAgentListContainersResponse{ + Containers: slices.Clone(ch.containers.Containers), + Warnings: slices.Clone(ch.containers.Warnings), + } + return cpy, nil + } + + timeoutCtx, timeoutCancel := context.WithTimeout(ctx, getContainersTimeout) + defer timeoutCancel() + updated, err := ch.cl.List(timeoutCtx) + if err != nil { + return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("get containers: %w", err) + } + ch.containers = &updated + ch.mtime = now + + // Return a copy of the cached data to avoid accidental modification by the + // caller. + cpy := codersdk.WorkspaceAgentListContainersResponse{ + Containers: slices.Clone(ch.containers.Containers), + Warnings: slices.Clone(ch.containers.Warnings), + } + return cpy, nil +} + +// Lister is an interface for listing containers visible to the +// workspace agent. +type Lister interface { + // List returns a list of containers visible to the workspace agent. + // This should include running and stopped containers. + List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) +} diff --git a/agent/agentcontainers/containers_dockercli.go b/agent/agentcontainers/containers_dockercli.go new file mode 100644 index 0000000000000..e7364125b8e0f --- /dev/null +++ b/agent/agentcontainers/containers_dockercli.go @@ -0,0 +1,228 @@ +package agentcontainers + +import ( + "bufio" + "bytes" + "context" + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/coder/coder/v2/agent/agentexec" + "github.com/coder/coder/v2/codersdk" + + "golang.org/x/exp/maps" + "golang.org/x/xerrors" +) + +// DockerCLILister is a ContainerLister that lists containers using the docker CLI +type DockerCLILister struct { + execer agentexec.Execer +} + +var _ Lister = &DockerCLILister{} + +func NewDocker(execer agentexec.Execer) Lister { + return &DockerCLILister{ + execer: agentexec.DefaultExecer, + } +} + +func (dcl *DockerCLILister) List(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) { + var stdoutBuf, stderrBuf bytes.Buffer + // List all container IDs, one per line, with no truncation + cmd := dcl.execer.CommandContext(ctx, "docker", "ps", "--all", "--quiet", "--no-trunc") + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + if err := cmd.Run(); err != nil { + // TODO(Cian): detect specific errors: + // - docker not installed + // - docker not running + // - no permissions to talk to docker + return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("run docker ps: %w: %q", err, strings.TrimSpace(stderrBuf.String())) + } + + ids := make([]string, 0) + scanner := bufio.NewScanner(&stdoutBuf) + for scanner.Scan() { + tmp := strings.TrimSpace(scanner.Text()) + if tmp == "" { + continue + } + ids = append(ids, tmp) + } + if err := scanner.Err(); err != nil { + return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("scan docker ps output: %w", err) + } + + dockerPsStderr := strings.TrimSpace(stderrBuf.String()) + + // now we can get the detailed information for each container + // Run `docker inspect` on each container ID + stdoutBuf.Reset() + stderrBuf.Reset() + // nolint: gosec // We are not executing user input, these IDs come from + // `docker ps`. + cmd = dcl.execer.CommandContext(ctx, "docker", append([]string{"inspect"}, ids...)...) + cmd.Stdout = &stdoutBuf + cmd.Stderr = &stderrBuf + if err := cmd.Run(); err != nil { + return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("run docker inspect: %w: %s", err, strings.TrimSpace(stderrBuf.String())) + } + + dockerInspectStderr := strings.TrimSpace(stderrBuf.String()) + + // NOTE: There is an unavoidable potential race condition where a + // container is removed between `docker ps` and `docker inspect`. + // In this case, stderr will contain an error message but stdout + // will still contain valid JSON. We will just end up missing + // information about the removed container. We could potentially + // log this error, but I'm not sure it's worth it. + ins := make([]dockerInspect, 0, len(ids)) + if err := json.NewDecoder(&stdoutBuf).Decode(&ins); err != nil { + // However, if we just get invalid JSON, we should absolutely return an error. + return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("decode docker inspect output: %w", err) + } + + res := codersdk.WorkspaceAgentListContainersResponse{ + Containers: make([]codersdk.WorkspaceAgentDevcontainer, len(ins)), + } + for idx, in := range ins { + out, warns := convertDockerInspect(in) + res.Warnings = append(res.Warnings, warns...) + res.Containers[idx] = out + } + + if dockerPsStderr != "" { + res.Warnings = append(res.Warnings, dockerPsStderr) + } + if dockerInspectStderr != "" { + res.Warnings = append(res.Warnings, dockerInspectStderr) + } + + return res, nil +} + +// To avoid a direct dependency on the Docker API, we use the docker CLI +// to fetch information about containers. +type dockerInspect struct { + ID string `json:"Id"` + Created time.Time `json:"Created"` + Config dockerInspectConfig `json:"Config"` + HostConfig dockerInspectHostConfig `json:"HostConfig"` + Name string `json:"Name"` + Mounts []dockerInspectMount `json:"Mounts"` + State dockerInspectState `json:"State"` +} + +type dockerInspectConfig struct { + Image string `json:"Image"` + Labels map[string]string `json:"Labels"` +} + +type dockerInspectHostConfig struct { + PortBindings map[string]any `json:"PortBindings"` +} + +type dockerInspectMount struct { + Source string `json:"Source"` + Destination string `json:"Destination"` + Type string `json:"Type"` +} + +type dockerInspectState struct { + Running bool `json:"Running"` + ExitCode int `json:"ExitCode"` + Error string `json:"Error"` +} + +func (dis dockerInspectState) String() string { + if dis.Running { + return "running" + } + var sb strings.Builder + _, _ = sb.WriteString("exited") + if dis.ExitCode != 0 { + _, _ = sb.WriteString(fmt.Sprintf(" with code %d", dis.ExitCode)) + } else { + _, _ = sb.WriteString(" successfully") + } + if dis.Error != "" { + _, _ = sb.WriteString(fmt.Sprintf(": %s", dis.Error)) + } + return sb.String() +} + +func convertDockerInspect(in dockerInspect) (codersdk.WorkspaceAgentDevcontainer, []string) { + var warns []string + out := codersdk.WorkspaceAgentDevcontainer{ + CreatedAt: in.Created, + // Remove the leading slash from the container name + FriendlyName: strings.TrimPrefix(in.Name, "/"), + ID: in.ID, + Image: in.Config.Image, + Labels: in.Config.Labels, + Ports: make([]codersdk.WorkspaceAgentListeningPort, 0), + Running: in.State.Running, + Status: in.State.String(), + Volumes: make(map[string]string, len(in.Mounts)), + } + + if in.HostConfig.PortBindings == nil { + in.HostConfig.PortBindings = make(map[string]any) + } + portKeys := maps.Keys(in.HostConfig.PortBindings) + // Sort the ports for deterministic output. + sort.Strings(portKeys) + for _, p := range portKeys { + if port, network, err := convertDockerPort(p); err != nil { + warns = append(warns, err.Error()) + } else { + out.Ports = append(out.Ports, codersdk.WorkspaceAgentListeningPort{ + Network: network, + Port: port, + }) + } + } + + if in.Mounts == nil { + in.Mounts = []dockerInspectMount{} + } + // Sort the mounts for deterministic output. + sort.Slice(in.Mounts, func(i, j int) bool { + return in.Mounts[i].Source < in.Mounts[j].Source + }) + for _, k := range in.Mounts { + out.Volumes[k.Source] = k.Destination + } + + return out, warns +} + +// convertDockerPort converts a Docker port string to a port number and network +// example: "8080/tcp" -> 8080, "tcp" +// +// "8080" -> 8080, "tcp" +func convertDockerPort(in string) (uint16, string, error) { + parts := strings.Split(in, "/") + switch len(parts) { + case 1: + // assume it's a TCP port + p, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, "", xerrors.Errorf("invalid port format: %s", in) + } + return uint16(p), "tcp", nil + case 2: + p, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, "", xerrors.Errorf("invalid port format: %s", in) + } + return uint16(p), parts[1], nil + default: + return 0, "", xerrors.Errorf("invalid port format: %s", in) + } +} diff --git a/agent/agentcontainers/containers_internal_test.go b/agent/agentcontainers/containers_internal_test.go new file mode 100644 index 0000000000000..b9f34261ddcad --- /dev/null +++ b/agent/agentcontainers/containers_internal_test.go @@ -0,0 +1,348 @@ +package agentcontainers + +import ( + "fmt" + "os/exec" + "runtime" + "strconv" + "strings" + "testing" + "time" + + "github.com/google/uuid" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" + + "github.com/coder/coder/v2/agent/agentcontainers/acmock" + "github.com/coder/coder/v2/agent/agentexec" + "github.com/coder/coder/v2/codersdk" + "github.com/coder/coder/v2/testutil" + "github.com/coder/quartz" +) + +// TestDockerCLIContainerLister tests the happy path of the +// dockerCLIContainerLister.List method. It starts a container with a known +// label, lists the containers, and verifies that the expected container is +// returned. The container is deleted after the test is complete. +func TestDockerCLIContainerLister(t *testing.T) { + t.Parallel() + if runtime.GOOS != "linux" { + t.Skip("creating containers on non-linux runners is slow and flaky") + } + + // Conditionally skip if Docker is not available. + if _, err := exec.LookPath("docker"); err != nil { + t.Skip("docker not found in PATH") + } + + pool, err := dockertest.NewPool("") + require.NoError(t, err, "Could not connect to docker") + testLabelValue := uuid.New().String() + // Create a temporary directory to validate that we surface mounts correctly. + testTempDir := t.TempDir() + // Pick a random port to expose for testing port bindings. + testRandPort := testutil.RandomPortNoListen(t) + ct, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "busybox", + Tag: "latest", + Cmd: []string{"sleep", "infnity"}, + Labels: map[string]string{"com.coder.test": testLabelValue}, + Mounts: []string{testTempDir + ":" + testTempDir}, + ExposedPorts: []string{fmt.Sprintf("%d/tcp", testRandPort)}, + PortBindings: map[docker.Port][]docker.PortBinding{ + docker.Port(fmt.Sprintf("%d/tcp", testRandPort)): { + { + HostIP: "0.0.0.0", + HostPort: strconv.FormatInt(int64(testRandPort), 10), + }, + }, + }, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + require.NoError(t, err, "Could not start test docker container") + t.Logf("Created container %q", ct.Container.Name) + t.Cleanup(func() { + assert.NoError(t, pool.Purge(ct), "Could not purge resource %q", ct.Container.Name) + t.Logf("Purged container %q", ct.Container.Name) + }) + + dcl := NewDocker(agentexec.DefaultExecer) + ctx := testutil.Context(t, testutil.WaitShort) + actual, err := dcl.List(ctx) + require.NoError(t, err, "Could not list containers") + require.Empty(t, actual.Warnings, "Expected no warnings") + var found bool + for _, foundContainer := range actual.Containers { + if foundContainer.ID == ct.Container.ID { + found = true + assert.Equal(t, ct.Container.Created, foundContainer.CreatedAt) + // ory/dockertest pre-pends a forward slash to the container name. + assert.Equal(t, strings.TrimPrefix(ct.Container.Name, "/"), foundContainer.FriendlyName) + // ory/dockertest returns the sha256 digest of the image. + assert.Equal(t, "busybox:latest", foundContainer.Image) + assert.Equal(t, ct.Container.Config.Labels, foundContainer.Labels) + assert.True(t, foundContainer.Running) + assert.Equal(t, "running", foundContainer.Status) + if assert.Len(t, foundContainer.Ports, 1) { + assert.Equal(t, testRandPort, foundContainer.Ports[0].Port) + assert.Equal(t, "tcp", foundContainer.Ports[0].Network) + } + if assert.Len(t, foundContainer.Volumes, 1) { + assert.Equal(t, testTempDir, foundContainer.Volumes[testTempDir]) + } + break + } + } + assert.True(t, found, "Expected to find container with label 'com.coder.test=%s'", testLabelValue) +} + +// TestContainersHandler tests the containersHandler.getContainers method using +// a mock implementation. It specifically tests caching behavior. +func TestContainersHandler(t *testing.T) { + t.Parallel() + + t.Run("list", func(t *testing.T) { + t.Parallel() + + fakeCt := fakeContainer(t) + fakeCt2 := fakeContainer(t) + makeResponse := func(cts ...codersdk.WorkspaceAgentDevcontainer) codersdk.WorkspaceAgentListContainersResponse { + return codersdk.WorkspaceAgentListContainersResponse{Containers: cts} + } + + // Each test case is called multiple times to ensure idempotency + for _, tc := range []struct { + name string + // data to be stored in the handler + cacheData codersdk.WorkspaceAgentListContainersResponse + // duration of cache + cacheDur time.Duration + // relative age of the cached data + cacheAge time.Duration + // function to set up expectations for the mock + setupMock func(*acmock.MockLister) + // expected result + expected codersdk.WorkspaceAgentListContainersResponse + // expected error + expectedErr string + }{ + { + name: "no cache", + setupMock: func(mcl *acmock.MockLister) { + mcl.EXPECT().List(gomock.Any()).Return(makeResponse(fakeCt), nil).AnyTimes() + }, + expected: makeResponse(fakeCt), + }, + { + name: "no data", + cacheData: makeResponse(), + cacheAge: 2 * time.Second, + cacheDur: time.Second, + setupMock: func(mcl *acmock.MockLister) { + mcl.EXPECT().List(gomock.Any()).Return(makeResponse(fakeCt), nil).AnyTimes() + }, + expected: makeResponse(fakeCt), + }, + { + name: "cached data", + cacheAge: time.Second, + cacheData: makeResponse(fakeCt), + cacheDur: 2 * time.Second, + expected: makeResponse(fakeCt), + }, + { + name: "lister error", + setupMock: func(mcl *acmock.MockLister) { + mcl.EXPECT().List(gomock.Any()).Return(makeResponse(), assert.AnError).AnyTimes() + }, + expectedErr: assert.AnError.Error(), + }, + { + name: "stale cache", + cacheAge: 2 * time.Second, + cacheData: makeResponse(fakeCt), + cacheDur: time.Second, + setupMock: func(mcl *acmock.MockLister) { + mcl.EXPECT().List(gomock.Any()).Return(makeResponse(fakeCt2), nil).AnyTimes() + }, + expected: makeResponse(fakeCt2), + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + var ( + ctx = testutil.Context(t, testutil.WaitShort) + clk = quartz.NewMock(t) + ctrl = gomock.NewController(t) + mockLister = acmock.NewMockLister(ctrl) + now = time.Now().UTC() + ch = devcontainersHandler{ + cacheDuration: tc.cacheDur, + cl: mockLister, + clock: clk, + containers: &tc.cacheData, + lockCh: make(chan struct{}, 1), + } + ) + if tc.cacheAge != 0 { + ch.mtime = now.Add(-tc.cacheAge) + } + if tc.setupMock != nil { + tc.setupMock(mockLister) + } + + clk.Set(now).MustWait(ctx) + + // Repeat the test to ensure idempotency + for i := 0; i < 2; i++ { + actual, err := ch.getContainers(ctx) + if tc.expectedErr != "" { + require.Empty(t, actual, "expected no data (attempt %d)", i) + require.ErrorContains(t, err, tc.expectedErr, "expected error (attempt %d)", i) + } else { + require.NoError(t, err, "expected no error (attempt %d)", i) + require.Equal(t, tc.expected, actual, "expected containers to be equal (attempt %d)", i) + } + } + }) + } + }) +} + +func TestConvertDockerPort(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + name string + in string + expectPort uint16 + expectNetwork string + expectError string + }{ + { + name: "empty port", + in: "", + expectError: "invalid port", + }, + { + name: "valid tcp port", + in: "8080/tcp", + expectPort: 8080, + expectNetwork: "tcp", + }, + { + name: "valid udp port", + in: "8080/udp", + expectPort: 8080, + expectNetwork: "udp", + }, + { + name: "valid port no network", + in: "8080", + expectPort: 8080, + expectNetwork: "tcp", + }, + { + name: "invalid port", + in: "invalid/tcp", + expectError: "invalid port", + }, + { + name: "invalid port no network", + in: "invalid", + expectError: "invalid port", + }, + { + name: "multiple network", + in: "8080/tcp/udp", + expectError: "invalid port", + }, + } { + tc := tc // not needed anymore but makes the linter happy + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + actualPort, actualNetwork, actualErr := convertDockerPort(tc.in) + if tc.expectError != "" { + assert.Zero(t, actualPort, "expected no port") + assert.Empty(t, actualNetwork, "expected no network") + assert.ErrorContains(t, actualErr, tc.expectError) + } else { + assert.NoError(t, actualErr, "expected no error") + assert.Equal(t, tc.expectPort, actualPort, "expected port to match") + assert.Equal(t, tc.expectNetwork, actualNetwork, "expected network to match") + } + }) + } +} + +func TestConvertDockerVolume(t *testing.T) { + t.Parallel() + + for _, tc := range []struct { + name string + in string + expectHostPath string + expectContainerPath string + expectError string + }{ + { + name: "empty volume", + in: "", + expectError: "invalid volume", + }, + { + name: "length 1 volume", + in: "/path/to/something", + expectHostPath: "/path/to/something", + expectContainerPath: "/path/to/something", + }, + { + name: "length 2 volume", + in: "/path/to/something=/path/to/something/else", + expectHostPath: "/path/to/something", + expectContainerPath: "/path/to/something/else", + }, + { + name: "invalid length volume", + in: "/path/to/something=/path/to/something/else=/path/to/something/else/else", + expectError: "invalid volume", + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + }) + } +} + +func fakeContainer(t *testing.T, mut ...func(*codersdk.WorkspaceAgentDevcontainer)) codersdk.WorkspaceAgentDevcontainer { + t.Helper() + ct := codersdk.WorkspaceAgentDevcontainer{ + CreatedAt: time.Now().UTC(), + ID: uuid.New().String(), + FriendlyName: testutil.GetRandomName(t), + Image: testutil.GetRandomName(t) + ":" + strings.Split(uuid.New().String(), "-")[0], + Labels: map[string]string{ + testutil.GetRandomName(t): testutil.GetRandomName(t), + }, + Running: true, + Ports: []codersdk.WorkspaceAgentListeningPort{ + { + Network: "tcp", + Port: testutil.RandomPortNoListen(t), + }, + }, + Status: testutil.MustRandString(t, 10), + Volumes: map[string]string{testutil.GetRandomName(t): testutil.GetRandomName(t)}, + } + for _, m := range mut { + m(&ct) + } + return ct +} diff --git a/agent/api.go b/agent/api.go index 2df791d6fbb68..a3241feb3b7ee 100644 --- a/agent/api.go +++ b/agent/api.go @@ -7,6 +7,7 @@ import ( "github.com/go-chi/chi/v5" + "github.com/coder/coder/v2/agent/agentcontainers" "github.com/coder/coder/v2/coderd/httpapi" "github.com/coder/coder/v2/codersdk" ) @@ -35,7 +36,9 @@ func (a *agent) apiHandler() http.Handler { ignorePorts: cpy, cacheDuration: cacheDuration, } + ch := agentcontainers.New(agentcontainers.WithLister(a.lister)) promHandler := PrometheusMetricsHandler(a.prometheusRegistry, a.logger) + r.Get("/api/v0/containers", ch.ServeHTTP) r.Get("/api/v0/listening-ports", lp.handler) r.Get("/api/v0/netcheck", a.HandleNetcheck) r.Get("/debug/logs", a.HandleHTTPDebugLogs) diff --git a/coderd/apidoc/docs.go b/coderd/apidoc/docs.go index 98c694ab4175d..c7d8601b3aaba 100644 --- a/coderd/apidoc/docs.go +++ b/coderd/apidoc/docs.go @@ -7854,6 +7854,49 @@ const docTemplate = `{ } } }, + "/workspaceagents/{workspaceagent}/containers": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": [ + "application/json" + ], + "tags": [ + "Agents" + ], + "summary": "Get running containers for workspace agent", + "operationId": "get-running-containers-for-workspace-agent", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Workspace agent ID", + "name": "workspaceagent", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "key=value", + "description": "Labels", + "name": "label", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.WorkspaceAgentListContainersResponse" + } + } + } + } + }, "/workspaceagents/{workspaceagent}/coordinate": { "get": { "security": [ @@ -15608,6 +15651,57 @@ const docTemplate = `{ } } }, + "codersdk.WorkspaceAgentDevcontainer": { + "type": "object", + "properties": { + "created_at": { + "description": "CreatedAt is the time the container was created.", + "type": "string", + "format": "date-time" + }, + "id": { + "description": "ID is the unique identifier of the container.", + "type": "string" + }, + "image": { + "description": "Image is the name of the container image.", + "type": "string" + }, + "labels": { + "description": "Labels is a map of key-value pairs of container labels.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "description": "FriendlyName is the human-readable name of the container.", + "type": "string" + }, + "ports": { + "description": "Ports includes ports exposed by the container.", + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.WorkspaceAgentListeningPort" + } + }, + "running": { + "description": "Running is true if the container is currently running.", + "type": "boolean" + }, + "status": { + "description": "Status is the current status of the container. This is somewhat\nimplementation-dependent, but should generally be a human-readable\nstring.", + "type": "string" + }, + "volumes": { + "description": "Volumes is a map of \"things\" mounted into the container. Again, this\nis somewhat implementation-dependent.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, "codersdk.WorkspaceAgentHealth": { "type": "object", "properties": { @@ -15648,6 +15742,25 @@ const docTemplate = `{ "WorkspaceAgentLifecycleOff" ] }, + "codersdk.WorkspaceAgentListContainersResponse": { + "type": "object", + "properties": { + "containers": { + "description": "Containers is a list of containers visible to the workspace agent.", + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.WorkspaceAgentDevcontainer" + } + }, + "warnings": { + "description": "Warnings is a list of warnings that may have occurred during the\nprocess of listing containers. This should not include fatal errors.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, "codersdk.WorkspaceAgentListeningPort": { "type": "object", "properties": { diff --git a/coderd/apidoc/swagger.json b/coderd/apidoc/swagger.json index afe36a8389899..3a11126423cf4 100644 --- a/coderd/apidoc/swagger.json +++ b/coderd/apidoc/swagger.json @@ -6930,6 +6930,45 @@ } } }, + "/workspaceagents/{workspaceagent}/containers": { + "get": { + "security": [ + { + "CoderSessionToken": [] + } + ], + "produces": ["application/json"], + "tags": ["Agents"], + "summary": "Get running containers for workspace agent", + "operationId": "get-running-containers-for-workspace-agent", + "parameters": [ + { + "type": "string", + "format": "uuid", + "description": "Workspace agent ID", + "name": "workspaceagent", + "in": "path", + "required": true + }, + { + "type": "string", + "format": "key=value", + "description": "Labels", + "name": "label", + "in": "query", + "required": true + } + ], + "responses": { + "200": { + "description": "OK", + "schema": { + "$ref": "#/definitions/codersdk.WorkspaceAgentListContainersResponse" + } + } + } + } + }, "/workspaceagents/{workspaceagent}/coordinate": { "get": { "security": [ @@ -14215,6 +14254,57 @@ } } }, + "codersdk.WorkspaceAgentDevcontainer": { + "type": "object", + "properties": { + "created_at": { + "description": "CreatedAt is the time the container was created.", + "type": "string", + "format": "date-time" + }, + "id": { + "description": "ID is the unique identifier of the container.", + "type": "string" + }, + "image": { + "description": "Image is the name of the container image.", + "type": "string" + }, + "labels": { + "description": "Labels is a map of key-value pairs of container labels.", + "type": "object", + "additionalProperties": { + "type": "string" + } + }, + "name": { + "description": "FriendlyName is the human-readable name of the container.", + "type": "string" + }, + "ports": { + "description": "Ports includes ports exposed by the container.", + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.WorkspaceAgentListeningPort" + } + }, + "running": { + "description": "Running is true if the container is currently running.", + "type": "boolean" + }, + "status": { + "description": "Status is the current status of the container. This is somewhat\nimplementation-dependent, but should generally be a human-readable\nstring.", + "type": "string" + }, + "volumes": { + "description": "Volumes is a map of \"things\" mounted into the container. Again, this\nis somewhat implementation-dependent.", + "type": "object", + "additionalProperties": { + "type": "string" + } + } + } + }, "codersdk.WorkspaceAgentHealth": { "type": "object", "properties": { @@ -14255,6 +14345,25 @@ "WorkspaceAgentLifecycleOff" ] }, + "codersdk.WorkspaceAgentListContainersResponse": { + "type": "object", + "properties": { + "containers": { + "description": "Containers is a list of containers visible to the workspace agent.", + "type": "array", + "items": { + "$ref": "#/definitions/codersdk.WorkspaceAgentDevcontainer" + } + }, + "warnings": { + "description": "Warnings is a list of warnings that may have occurred during the\nprocess of listing containers. This should not include fatal errors.", + "type": "array", + "items": { + "type": "string" + } + } + } + }, "codersdk.WorkspaceAgentListeningPort": { "type": "object", "properties": { diff --git a/coderd/coderd.go b/coderd/coderd.go index be558797389b9..4603f78acc0d9 100644 --- a/coderd/coderd.go +++ b/coderd/coderd.go @@ -1211,6 +1211,7 @@ func New(options *Options) *API { r.Get("/logs", api.workspaceAgentLogs) r.Get("/listening-ports", api.workspaceAgentListeningPorts) r.Get("/connection", api.workspaceAgentConnection) + r.Get("/containers", api.workspaceAgentListContainers) r.Get("/coordinate", api.workspaceAgentClientCoordinate) // PTY is part of workspaceAppServer. diff --git a/coderd/util/maps/maps.go b/coderd/util/maps/maps.go new file mode 100644 index 0000000000000..6d3d31717d33b --- /dev/null +++ b/coderd/util/maps/maps.go @@ -0,0 +1,27 @@ +package maps + +import ( + "sort" + + "golang.org/x/exp/constraints" +) + +// Subset returns true if all the keys of a are present +// in b and have the same values. +func Subset[T, U comparable](a, b map[T]U) bool { + for ka, va := range a { + if vb, ok := b[ka]; !ok || va != vb { + return false + } + } + return true +} + +// SortedKeys returns the keys of m in sorted order. +func SortedKeys[T constraints.Ordered](m map[T]any) (keys []T) { + for k := range m { + keys = append(keys, k) + } + sort.Slice(keys, func(i, j int) bool { return keys[i] < keys[j] }) + return keys +} diff --git a/coderd/util/maps/maps_test.go b/coderd/util/maps/maps_test.go new file mode 100644 index 0000000000000..1858d6467e89a --- /dev/null +++ b/coderd/util/maps/maps_test.go @@ -0,0 +1,64 @@ +package maps_test + +import ( + "strconv" + "testing" + + "github.com/coder/coder/v2/coderd/util/maps" +) + +func TestSubset(t *testing.T) { + t.Parallel() + + for idx, tc := range []struct { + a map[string]string + b map[string]string + expected bool + }{ + { + a: nil, + b: nil, + expected: true, + }, + { + a: map[string]string{}, + b: map[string]string{}, + expected: true, + }, + { + a: map[string]string{"a": "1", "b": "2"}, + b: map[string]string{"a": "1", "b": "2"}, + expected: true, + }, + { + a: map[string]string{"a": "1", "b": "2"}, + b: map[string]string{"a": "1"}, + expected: false, + }, + { + a: map[string]string{"a": "1"}, + b: map[string]string{"a": "1", "b": "2"}, + expected: true, + }, + { + a: map[string]string{"a": "1", "b": "2"}, + b: map[string]string{}, + expected: false, + }, + { + a: map[string]string{"a": "1", "b": "2"}, + b: map[string]string{"a": "1", "b": "3"}, + expected: false, + }, + } { + tc := tc + t.Run("#"+strconv.Itoa(idx), func(t *testing.T) { + t.Parallel() + + actual := maps.Subset(tc.a, tc.b) + if actual != tc.expected { + t.Errorf("expected %v, got %v", tc.expected, actual) + } + }) + } +} diff --git a/coderd/workspaceagents.go b/coderd/workspaceagents.go index 026c3581ff14d..8132da9bd7bfa 100644 --- a/coderd/workspaceagents.go +++ b/coderd/workspaceagents.go @@ -34,6 +34,7 @@ import ( "github.com/coder/coder/v2/coderd/jwtutils" "github.com/coder/coder/v2/coderd/rbac" "github.com/coder/coder/v2/coderd/rbac/policy" + maputil "github.com/coder/coder/v2/coderd/util/maps" "github.com/coder/coder/v2/coderd/wspubsub" "github.com/coder/coder/v2/codersdk" "github.com/coder/coder/v2/codersdk/agentsdk" @@ -678,6 +679,99 @@ func (api *API) workspaceAgentListeningPorts(rw http.ResponseWriter, r *http.Req httpapi.Write(ctx, rw, http.StatusOK, portsResponse) } +// @Summary Get running containers for workspace agent +// @ID get-running-containers-for-workspace-agent +// @Security CoderSessionToken +// @Produce json +// @Tags Agents +// @Param workspaceagent path string true "Workspace agent ID" format(uuid) +// @Param label query string true "Labels" format(key=value) +// @Success 200 {object} codersdk.WorkspaceAgentListContainersResponse +// @Router /workspaceagents/{workspaceagent}/containers [get] +func (api *API) workspaceAgentListContainers(rw http.ResponseWriter, r *http.Request) { + ctx := r.Context() + workspaceAgent := httpmw.WorkspaceAgentParam(r) + + labelParam, ok := r.URL.Query()["label"] + if !ok { + labelParam = []string{} + } + labels := make(map[string]string, len(labelParam)/2) + for _, label := range labelParam { + kvs := strings.Split(label, "=") + if len(kvs) != 2 { + httpapi.Write(r.Context(), rw, http.StatusBadRequest, codersdk.Response{ + Message: "Invalid label format", + Detail: "Labels must be in the format key=value", + }) + return + } + labels[kvs[0]] = kvs[1] + } + + // If the agent is unreachable, the request will hang. Assume that if we + // don't get a response after 30s that the agent is unreachable. + ctx, cancel := context.WithTimeout(ctx, 30*time.Second) + defer cancel() + apiAgent, err := db2sdk.WorkspaceAgent( + api.DERPMap(), + *api.TailnetCoordinator.Load(), + workspaceAgent, + nil, + nil, + nil, + api.AgentInactiveDisconnectTimeout, + api.DeploymentValues.AgentFallbackTroubleshootingURL.String(), + ) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error reading workspace agent.", + Detail: err.Error(), + }) + return + } + if apiAgent.Status != codersdk.WorkspaceAgentConnected { + httpapi.Write(ctx, rw, http.StatusBadRequest, codersdk.Response{ + Message: fmt.Sprintf("Agent state is %q, it must be in the %q state.", apiAgent.Status, codersdk.WorkspaceAgentConnected), + }) + return + } + + agentConn, release, err := api.agentProvider.AgentConn(ctx, workspaceAgent.ID) + if err != nil { + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error dialing workspace agent.", + Detail: err.Error(), + }) + return + } + defer release() + + // Get a list of containers that the agent is able to detect + cts, err := agentConn.ListContainers(ctx) + if err != nil { + if errors.Is(err, context.Canceled) { + httpapi.Write(ctx, rw, http.StatusRequestTimeout, codersdk.Response{ + Message: "Failed to fetch containers from agent.", + Detail: "Request timed out.", + }) + return + } + httpapi.Write(ctx, rw, http.StatusInternalServerError, codersdk.Response{ + Message: "Internal error fetching containers.", + Detail: err.Error(), + }) + return + } + + // Filter in-place by labels + cts.Containers = slices.DeleteFunc(cts.Containers, func(ct codersdk.WorkspaceAgentDevcontainer) bool { + return !maputil.Subset(labels, ct.Labels) + }) + + httpapi.Write(ctx, rw, http.StatusOK, cts) +} + // @Summary Get connection info for workspace agent // @ID get-connection-info-for-workspace-agent // @Security CoderSessionToken diff --git a/coderd/workspaceagents_test.go b/coderd/workspaceagents_test.go index c75b3f3ed53fc..f7a3513d4f655 100644 --- a/coderd/workspaceagents_test.go +++ b/coderd/workspaceagents_test.go @@ -7,6 +7,7 @@ import ( "maps" "net" "net/http" + "os" "runtime" "strconv" "strings" @@ -15,9 +16,13 @@ import ( "time" "github.com/go-jose/go-jose/v4/jwt" + "github.com/google/go-cmp/cmp" "github.com/google/uuid" + "github.com/ory/dockertest/v3" + "github.com/ory/dockertest/v3/docker" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" "golang.org/x/xerrors" "google.golang.org/protobuf/types/known/timestamppb" "tailscale.com/tailcfg" @@ -25,6 +30,9 @@ import ( "cdr.dev/slog" "cdr.dev/slog/sloggers/slogtest" "github.com/coder/coder/v2/agent" + "github.com/coder/coder/v2/agent/agentcontainers" + "github.com/coder/coder/v2/agent/agentcontainers/acmock" + "github.com/coder/coder/v2/agent/agentexec" "github.com/coder/coder/v2/agent/agenttest" agentproto "github.com/coder/coder/v2/agent/proto" "github.com/coder/coder/v2/coderd/coderdtest" @@ -1053,6 +1061,187 @@ func TestWorkspaceAgentListeningPorts(t *testing.T) { }) } +func TestWorkspaceAgentContainers(t *testing.T) { + t.Parallel() + + // This test will not normally run in CI, but is kept here as a semi-manual + // test for local development. Run it as follows: + // CODER_TEST_USE_DOCKER=1 go test -run TestWorkspaceAgentContainers/Docker ./coderd + t.Run("Docker", func(t *testing.T) { + t.Parallel() + if ctud, ok := os.LookupEnv("CODER_TEST_USE_DOCKER"); !ok || ctud != "1" { + t.Skip("Set CODER_TEST_USE_DOCKER=1 to run this test") + } + + pool, err := dockertest.NewPool("") + require.NoError(t, err, "Could not connect to docker") + testLabels := map[string]string{ + "com.coder.test": uuid.New().String(), + } + ct, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "busybox", + Tag: "latest", + Cmd: []string{"sleep", "infinity"}, + Labels: testLabels, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + require.NoError(t, err, "Could not start test docker container") + t.Cleanup(func() { + assert.NoError(t, pool.Purge(ct), "Could not purge resource %q", ct.Container.Name) + }) + + // Start another container which we will expect to ignore. + ct2, err := pool.RunWithOptions(&dockertest.RunOptions{ + Repository: "busybox", + Tag: "latest", + Cmd: []string{"sleep", "infinity"}, + Labels: map[string]string{"com.coder.test": "ignoreme"}, + }, func(config *docker.HostConfig) { + config.AutoRemove = true + config.RestartPolicy = docker.RestartPolicy{Name: "no"} + }) + require.NoError(t, err, "Could not start second test docker container") + t.Cleanup(func() { + assert.NoError(t, pool.Purge(ct2), "Could not purge resource %q", ct2.Container.Name) + }) + + client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{}) + + user := coderdtest.CreateFirstUser(t, client) + r := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { + return agents + }).Do() + _ = agenttest.New(t, client.URL, r.AgentToken, func(opts *agent.Options) { + opts.ContainerLister = agentcontainers.NewDocker(agentexec.DefaultExecer) + }) + resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).Wait() + require.Len(t, resources, 1, "expected one resource") + require.Len(t, resources[0].Agents, 1, "expected one agent") + agentID := resources[0].Agents[0].ID + + ctx := testutil.Context(t, testutil.WaitLong) + + // If we filter by testLabels, we should only get one container back. + res, err := client.WorkspaceAgentListContainers(ctx, agentID, testLabels) + require.NoError(t, err, "failed to list containers filtered by test label") + require.Len(t, res.Containers, 1, "expected exactly one container") + assert.Equal(t, ct.Container.ID, res.Containers[0].ID, "expected container ID to match") + assert.Equal(t, "busybox:latest", res.Containers[0].Image, "expected container image to match") + assert.Equal(t, ct.Container.Config.Labels, res.Containers[0].Labels, "expected container labels to match") + assert.Equal(t, strings.TrimPrefix(ct.Container.Name, "/"), res.Containers[0].FriendlyName, "expected container name to match") + assert.True(t, res.Containers[0].Running, "expected container to be running") + assert.Equal(t, "running", res.Containers[0].Status, "expected container status to be running") + + // List all containers and ensure we get at least both (there may be more). + res, err = client.WorkspaceAgentListContainers(ctx, agentID, nil) + require.NoError(t, err, "failed to list all containers") + require.NotEmpty(t, res.Containers, "expected to find containers") + var found []string + for _, c := range res.Containers { + found = append(found, c.ID) + } + require.Contains(t, found, ct.Container.ID, "expected to find first container without label filter") + require.Contains(t, found, ct2.Container.ID, "expected to find first container without label filter") + }) + + // This test will normally run in CI. It uses a mock implementation of + // agentcontainers.Lister instead of introducing a hard dependency on Docker. + t.Run("Mock", func(t *testing.T) { + t.Parallel() + + // begin test fixtures + testLabels := map[string]string{ + "com.coder.test": uuid.New().String(), + } + testResponse := codersdk.WorkspaceAgentListContainersResponse{ + Containers: []codersdk.WorkspaceAgentDevcontainer{ + { + ID: uuid.NewString(), + CreatedAt: dbtime.Now(), + FriendlyName: testutil.GetRandomName(t), + Image: "busybox:latest", + Labels: testLabels, + Running: true, + Status: "running", + Ports: []codersdk.WorkspaceAgentListeningPort{ + { + Network: "tcp", + Port: 80, + }, + }, + Volumes: map[string]string{ + "/host": "/container", + }, + }, + }, + } + // end test fixtures + + for _, tc := range []struct { + name string + setupMock func(*acmock.MockLister) (codersdk.WorkspaceAgentListContainersResponse, error) + }{ + { + name: "test response", + setupMock: func(mcl *acmock.MockLister) (codersdk.WorkspaceAgentListContainersResponse, error) { + mcl.EXPECT().List(gomock.Any()).Return(testResponse, nil).Times(1) + return testResponse, nil + }, + }, + { + name: "error response", + setupMock: func(mcl *acmock.MockLister) (codersdk.WorkspaceAgentListContainersResponse, error) { + mcl.EXPECT().List(gomock.Any()).Return(codersdk.WorkspaceAgentListContainersResponse{}, assert.AnError).Times(1) + return codersdk.WorkspaceAgentListContainersResponse{}, assert.AnError + }, + }, + } { + tc := tc + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ctrl := gomock.NewController(t) + mcl := acmock.NewMockLister(ctrl) + expected, expectedErr := tc.setupMock(mcl) + client, db := coderdtest.NewWithDatabase(t, &coderdtest.Options{}) + user := coderdtest.CreateFirstUser(t, client) + r := dbfake.WorkspaceBuild(t, db, database.WorkspaceTable{ + OrganizationID: user.OrganizationID, + OwnerID: user.UserID, + }).WithAgent(func(agents []*proto.Agent) []*proto.Agent { + return agents + }).Do() + _ = agenttest.New(t, client.URL, r.AgentToken, func(opts *agent.Options) { + opts.ContainerLister = mcl + }) + resources := coderdtest.NewWorkspaceAgentWaiter(t, client, r.Workspace.ID).Wait() + require.Len(t, resources, 1, "expected one resource") + require.Len(t, resources[0].Agents, 1, "expected one agent") + agentID := resources[0].Agents[0].ID + + ctx := testutil.Context(t, testutil.WaitLong) + + // List containers and ensure we get the expected mocked response. + res, err := client.WorkspaceAgentListContainers(ctx, agentID, nil) + if expectedErr != nil { + require.Contains(t, err.Error(), expectedErr.Error(), "unexpected error") + require.Empty(t, res, "expected empty response") + } else { + require.NoError(t, err, "failed to list all containers") + if diff := cmp.Diff(expected, res); diff != "" { + t.Fatalf("unexpected response (-want +got):\n%s", diff) + } + } + }) + } + }) +} + func TestWorkspaceAgentAppHealth(t *testing.T) { t.Parallel() client, db := coderdtest.NewWithDatabase(t, nil) diff --git a/codersdk/workspaceagents.go b/codersdk/workspaceagents.go index 4f04b70aee83c..8e2209fa8072b 100644 --- a/codersdk/workspaceagents.go +++ b/codersdk/workspaceagents.go @@ -392,6 +392,72 @@ func (c *Client) WorkspaceAgentListeningPorts(ctx context.Context, agentID uuid. return listeningPorts, json.NewDecoder(res.Body).Decode(&listeningPorts) } +// WorkspaceAgentDevcontainer describes a devcontainer of some sort +// that is visible to the workspace agent. This struct is an abstraction +// of potentially multiple implementations, and the fields will be +// somewhat implementation-dependent. +type WorkspaceAgentDevcontainer struct { + // CreatedAt is the time the container was created. + CreatedAt time.Time `json:"created_at" format:"date-time"` + // ID is the unique identifier of the container. + ID string `json:"id"` + // FriendlyName is the human-readable name of the container. + FriendlyName string `json:"name"` + // Image is the name of the container image. + Image string `json:"image"` + // Labels is a map of key-value pairs of container labels. + Labels map[string]string `json:"labels"` + // Running is true if the container is currently running. + Running bool `json:"running"` + // Ports includes ports exposed by the container. + Ports []WorkspaceAgentListeningPort `json:"ports"` + // Status is the current status of the container. This is somewhat + // implementation-dependent, but should generally be a human-readable + // string. + Status string `json:"status"` + // Volumes is a map of "things" mounted into the container. Again, this + // is somewhat implementation-dependent. + Volumes map[string]string `json:"volumes"` +} + +// WorkspaceAgentListContainersResponse is the response to the list containers +// request. +type WorkspaceAgentListContainersResponse struct { + // Containers is a list of containers visible to the workspace agent. + Containers []WorkspaceAgentDevcontainer `json:"containers"` + // Warnings is a list of warnings that may have occurred during the + // process of listing containers. This should not include fatal errors. + Warnings []string `json:"warnings,omitempty"` +} + +func workspaceAgentContainersLabelFilter(kvs map[string]string) RequestOption { + return func(r *http.Request) { + q := r.URL.Query() + for k, v := range kvs { + kv := fmt.Sprintf("%s=%s", k, v) + q.Add("label", kv) + } + r.URL.RawQuery = q.Encode() + } +} + +// WorkspaceAgentListContainers returns a list of containers that are currently +// running on a Docker daemon accessible to the workspace agent. +func (c *Client) WorkspaceAgentListContainers(ctx context.Context, agentID uuid.UUID, labels map[string]string) (WorkspaceAgentListContainersResponse, error) { + lf := workspaceAgentContainersLabelFilter(labels) + res, err := c.Request(ctx, http.MethodGet, fmt.Sprintf("/api/v2/workspaceagents/%s/containers", agentID), nil, lf) + if err != nil { + return WorkspaceAgentListContainersResponse{}, err + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return WorkspaceAgentListContainersResponse{}, ReadBodyAsError(res) + } + var cr WorkspaceAgentListContainersResponse + + return cr, json.NewDecoder(res.Body).Decode(&cr) +} + //nolint:revive // Follow is a control flag on the server as well. func (c *Client) WorkspaceAgentLogsAfter(ctx context.Context, agentID uuid.UUID, after int64, follow bool) (<-chan []WorkspaceAgentLog, io.Closer, error) { var queryParams []string diff --git a/codersdk/workspacesdk/agentconn.go b/codersdk/workspacesdk/agentconn.go index 4c3a9539bbf55..f803f8736a6fa 100644 --- a/codersdk/workspacesdk/agentconn.go +++ b/codersdk/workspacesdk/agentconn.go @@ -336,6 +336,22 @@ func (c *AgentConn) PrometheusMetrics(ctx context.Context) ([]byte, error) { return bs, nil } +// ListContainers returns a response from the agent's containers endpoint +func (c *AgentConn) ListContainers(ctx context.Context) (codersdk.WorkspaceAgentListContainersResponse, error) { + ctx, span := tracing.StartSpan(ctx) + defer span.End() + res, err := c.apiRequest(ctx, http.MethodGet, "/api/v0/containers", nil) + if err != nil { + return codersdk.WorkspaceAgentListContainersResponse{}, xerrors.Errorf("do request: %w", err) + } + defer res.Body.Close() + if res.StatusCode != http.StatusOK { + return codersdk.WorkspaceAgentListContainersResponse{}, codersdk.ReadBodyAsError(res) + } + var resp codersdk.WorkspaceAgentListContainersResponse + return resp, json.NewDecoder(res.Body).Decode(&resp) +} + // apiRequest makes a request to the workspace agent's HTTP API server. func (c *AgentConn) apiRequest(ctx context.Context, method, path string, body io.Reader) (*http.Response, error) { ctx, span := tracing.StartSpan(ctx) diff --git a/docs/reference/api/agents.md b/docs/reference/api/agents.md index 22ebe7f35530f..38e30c35e18cd 100644 --- a/docs/reference/api/agents.md +++ b/docs/reference/api/agents.md @@ -638,6 +638,71 @@ curl -X GET http://coder-server:8080/api/v2/workspaceagents/{workspaceagent}/con To perform this operation, you must be authenticated. [Learn more](authentication.md). +## Get running containers for workspace agent + +### Code samples + +```shell +# Example request using curl +curl -X GET http://coder-server:8080/api/v2/workspaceagents/{workspaceagent}/containers?label=string \ + -H 'Accept: application/json' \ + -H 'Coder-Session-Token: API_KEY' +``` + +`GET /workspaceagents/{workspaceagent}/containers` + +### Parameters + +| Name | In | Type | Required | Description | +|------------------|-------|-------------------|----------|--------------------| +| `workspaceagent` | path | string(uuid) | true | Workspace agent ID | +| `label` | query | string(key=value) | true | Labels | + +### Example responses + +> 200 Response + +```json +{ + "containers": [ + { + "created_at": "2019-08-24T14:15:22Z", + "id": "string", + "image": "string", + "labels": { + "property1": "string", + "property2": "string" + }, + "name": "string", + "ports": [ + { + "network": "string", + "port": 0, + "process_name": "string" + } + ], + "running": true, + "status": "string", + "volumes": { + "property1": "string", + "property2": "string" + } + } + ], + "warnings": [ + "string" + ] +} +``` + +### Responses + +| Status | Meaning | Description | Schema | +|--------|---------------------------------------------------------|-------------|----------------------------------------------------------------------------------------------------------| +| 200 | [OK](https://tools.ietf.org/html/rfc7231#section-6.3.1) | OK | [codersdk.WorkspaceAgentListContainersResponse](schemas.md#codersdkworkspaceagentlistcontainersresponse) | + +To perform this operation, you must be authenticated. [Learn more](authentication.md). + ## Coordinate workspace agent ### Code samples diff --git a/docs/reference/api/schemas.md b/docs/reference/api/schemas.md index 082b3f3a1f19f..223cf302dc75f 100644 --- a/docs/reference/api/schemas.md +++ b/docs/reference/api/schemas.md @@ -7561,6 +7561,50 @@ If the schedule is empty, the user will be updated to use the default schedule.| | `updated_at` | string | false | | | | `version` | string | false | | | +## codersdk.WorkspaceAgentDevcontainer + +```json +{ + "created_at": "2019-08-24T14:15:22Z", + "id": "string", + "image": "string", + "labels": { + "property1": "string", + "property2": "string" + }, + "name": "string", + "ports": [ + { + "network": "string", + "port": 0, + "process_name": "string" + } + ], + "running": true, + "status": "string", + "volumes": { + "property1": "string", + "property2": "string" + } +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +|--------------------|---------------------------------------------------------------------------------------|----------|--------------|--------------------------------------------------------------------------------------------------------------------------------------------| +| `created_at` | string | false | | Created at is the time the container was created. | +| `id` | string | false | | ID is the unique identifier of the container. | +| `image` | string | false | | Image is the name of the container image. | +| `labels` | object | false | | Labels is a map of key-value pairs of container labels. | +| » `[any property]` | string | false | | | +| `name` | string | false | | Name is the human-readable name of the container. | +| `ports` | array of [codersdk.WorkspaceAgentListeningPort](#codersdkworkspaceagentlisteningport) | false | | Ports includes ports exposed by the container. | +| `running` | boolean | false | | Running is true if the container is currently running. | +| `status` | string | false | | Status is the current status of the container. This is somewhat implementation-dependent, but should generally be a human-readable string. | +| `volumes` | object | false | | Volumes is a map of "things" mounted into the container. Again, this is somewhat implementation-dependent. | +| » `[any property]` | string | false | | | + ## codersdk.WorkspaceAgentHealth ```json @@ -7599,6 +7643,48 @@ If the schedule is empty, the user will be updated to use the default schedule.| | `shutdown_error` | | `off` | +## codersdk.WorkspaceAgentListContainersResponse + +```json +{ + "containers": [ + { + "created_at": "2019-08-24T14:15:22Z", + "id": "string", + "image": "string", + "labels": { + "property1": "string", + "property2": "string" + }, + "name": "string", + "ports": [ + { + "network": "string", + "port": 0, + "process_name": "string" + } + ], + "running": true, + "status": "string", + "volumes": { + "property1": "string", + "property2": "string" + } + } + ], + "warnings": [ + "string" + ] +} +``` + +### Properties + +| Name | Type | Required | Restrictions | Description | +|--------------|-------------------------------------------------------------------------------------|----------|--------------|---------------------------------------------------------------------------------------------------------------------------------------| +| `containers` | array of [codersdk.WorkspaceAgentDevcontainer](#codersdkworkspaceagentdevcontainer) | false | | Containers is a list of containers visible to the workspace agent. | +| `warnings` | array of string | false | | Warnings is a list of warnings that may have occurred during the process of listing containers. This should not include fatal errors. | + ## codersdk.WorkspaceAgentListeningPort ```json diff --git a/site/src/api/typesGenerated.ts b/site/src/api/typesGenerated.ts index 2e7732c525c42..6a776da17c490 100644 --- a/site/src/api/typesGenerated.ts +++ b/site/src/api/typesGenerated.ts @@ -2926,6 +2926,19 @@ export interface WorkspaceAgent { readonly startup_script_behavior: WorkspaceAgentStartupScriptBehavior; } +// From codersdk/workspaceagents.go +export interface WorkspaceAgentDevcontainer { + readonly created_at: string; + readonly id: string; + readonly name: string; + readonly image: string; + readonly labels: Record; + readonly running: boolean; + readonly ports: readonly WorkspaceAgentListeningPort[]; + readonly status: string; + readonly volumes: Record; +} + // From codersdk/workspaceagents.go export interface WorkspaceAgentHealth { readonly healthy: boolean; @@ -2956,6 +2969,12 @@ export const WorkspaceAgentLifecycles: WorkspaceAgentLifecycle[] = [ "starting", ]; +// From codersdk/workspaceagents.go +export interface WorkspaceAgentListContainersResponse { + readonly containers: readonly WorkspaceAgentDevcontainer[]; + readonly warnings?: readonly string[]; +} + // From codersdk/workspaceagents.go export interface WorkspaceAgentListeningPort { readonly process_name: string;