Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
57 changes: 56 additions & 1 deletion coderd/dynamicparameters/render.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,11 +30,34 @@
// Forgetting to do so will result in a memory leak.
type Renderer interface {
Render(ctx context.Context, ownerID uuid.UUID, values map[string]string) (*preview.Output, hcl.Diagnostics)
RenderWithoutCache(ctx context.Context, ownerID uuid.UUID, values map[string]string) (*preview.Output, hcl.Diagnostics)
Close()
}

var ErrTemplateVersionNotReady = xerrors.New("template version job not finished")

// RenderCache is an interface for caching preview.Preview results.
type RenderCache interface {
get(templateVersionID, ownerID uuid.UUID, parameters map[string]string) (*preview.Output, bool)
put(templateVersionID, ownerID uuid.UUID, parameters map[string]string, output *preview.Output)
Close()
}

// noopRenderCache is a no-op implementation of RenderCache that doesn't cache anything.
type noopRenderCache struct{}

func (noopRenderCache) get(uuid.UUID, uuid.UUID, map[string]string) (*preview.Output, bool) {
return nil, false
}

func (noopRenderCache) put(uuid.UUID, uuid.UUID, map[string]string, *preview.Output) {
// no-op
}

func (noopRenderCache) Close() {
// no-op
}

// loader is used to load the necessary coder objects for rendering a template
// version's parameters. The output is a Renderer, which is the object that uses
// the cached objects to render the template version's parameters.
Expand All @@ -46,6 +69,9 @@
job *database.ProvisionerJob
terraformValues *database.TemplateVersionTerraformValue
templateVariableValues *[]database.TemplateVersionVariable

// renderCache caches preview.Preview results
renderCache RenderCache
}

// Prepare is the entrypoint for this package. It loads the necessary objects &
Expand All @@ -54,6 +80,7 @@
func Prepare(ctx context.Context, db database.Store, cache files.FileAcquirer, versionID uuid.UUID, options ...func(r *loader)) (Renderer, error) {
l := &loader{
templateVersionID: versionID,
renderCache: noopRenderCache{},
}

for _, opt := range options {
Expand Down Expand Up @@ -91,6 +118,12 @@
}
}

func WithRenderCache(cache RenderCache) func(r *loader) {
return func(r *loader) {
r.renderCache = cache
}
}

func (r *loader) loadData(ctx context.Context, db database.Store) error {
if r.templateVersion == nil {
tv, err := db.GetTemplateVersionByID(ctx, r.templateVersionID)
Expand Down Expand Up @@ -227,6 +260,21 @@
}

func (r *dynamicRenderer) Render(ctx context.Context, ownerID uuid.UUID, values map[string]string) (*preview.Output, hcl.Diagnostics) {
return r.render(ctx, ownerID, values, true)
}

func (r *dynamicRenderer) RenderWithoutCache(ctx context.Context, ownerID uuid.UUID, values map[string]string) (*preview.Output, hcl.Diagnostics) {
return r.render(ctx, ownerID, values, false)
}

func (r *dynamicRenderer) render(ctx context.Context, ownerID uuid.UUID, values map[string]string, useCache bool) (*preview.Output, hcl.Diagnostics) {

Check failure on line 270 in coderd/dynamicparameters/render.go

View workflow job for this annotation

GitHub Actions / lint

confusing-naming: Method 'render' differs only by capitalization to method 'Render' in the same source file (revive)
// Check cache first if enabled
if useCache {
if cached, ok := r.data.renderCache.get(r.data.templateVersionID, ownerID, values); ok {
return cached, nil
}
}

// Always start with the cached error, if we have one.
ownerErr := r.ownerErrors[ownerID]
if ownerErr == nil {
Expand Down Expand Up @@ -258,7 +306,14 @@
Logger: slog.New(slog.DiscardHandler),
}

return preview.Preview(ctx, input, r.templateFS)
output, diags := preview.Preview(ctx, input, r.templateFS)

// Store in cache if successful and caching is enabled
if useCache && !diags.HasErrors() {
r.data.renderCache.put(r.data.templateVersionID, ownerID, values, output)
}

return output, diags
}

func (r *dynamicRenderer) getWorkspaceOwnerData(ctx context.Context, ownerID uuid.UUID) error {
Expand Down
214 changes: 214 additions & 0 deletions coderd/dynamicparameters/rendercache.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,214 @@
package dynamicparameters

import (
"context"
"fmt"
"sort"
"sync"
"time"

"github.com/cespare/xxhash/v2"
"github.com/google/uuid"
"github.com/prometheus/client_golang/prometheus"

"github.com/coder/preview"
"github.com/coder/quartz"
)

// RenderCacheImpl is a simple in-memory cache for preview.Preview results.
// It caches based on (templateVersionID, ownerID, parameterValues).
type RenderCacheImpl struct {
mu sync.RWMutex
entries map[cacheKey]*cacheEntry

// Metrics (optional)
cacheHits prometheus.Counter
cacheMisses prometheus.Counter
cacheSize prometheus.Gauge

// TTL cleanup
clock quartz.Clock
ttl time.Duration
stopOnce sync.Once
stopCh chan struct{}
doneCh chan struct{}
}

type cacheEntry struct {
output *preview.Output
timestamp time.Time
}

type cacheKey struct {
templateVersionID uuid.UUID
ownerID uuid.UUID
parameterHash uint64
}

// NewRenderCache creates a new render cache with a default TTL of 1 hour.
func NewRenderCache() *RenderCacheImpl {
return newCache(quartz.NewReal(), time.Hour, nil, nil, nil)
}

// NewRenderCacheWithMetrics creates a new render cache with Prometheus metrics.
func NewRenderCacheWithMetrics(cacheHits, cacheMisses prometheus.Counter, cacheSize prometheus.Gauge) *RenderCacheImpl {
return newCache(quartz.NewReal(), time.Hour, cacheHits, cacheMisses, cacheSize)
}

func newCache(clock quartz.Clock, ttl time.Duration, cacheHits, cacheMisses prometheus.Counter, cacheSize prometheus.Gauge) *RenderCacheImpl {
c := &RenderCacheImpl{
entries: make(map[cacheKey]*cacheEntry),
clock: clock,
cacheHits: cacheHits,
cacheMisses: cacheMisses,
cacheSize: cacheSize,
ttl: ttl,
stopCh: make(chan struct{}),
doneCh: make(chan struct{}),
}

Comment on lines +58 to +69
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The pattern we use other places is we keep the metrics on the RenderCacheImpl. Then call cache.Register(prometheusregistry) from outside. Or just pass in the prometheus registry to New.

NewRenderCache(registry *prometheus.Registry)

If the registry is nil, like in tests, just don't attach the metrics.

// Start cleanup goroutine
go c.cleanupLoop(context.Background())

return c
}

// NewRenderCacheForTest creates a new render cache for testing purposes.
func NewRenderCacheForTest() *RenderCacheImpl {
return NewRenderCache()
}

// Close stops the cleanup goroutine and waits for it to finish.
func (c *RenderCacheImpl) Close() {
c.stopOnce.Do(func() {
close(c.stopCh)
<-c.doneCh
})
}

func (c *RenderCacheImpl) get(templateVersionID, ownerID uuid.UUID, parameters map[string]string) (*preview.Output, bool) {
key := makeKey(templateVersionID, ownerID, parameters)
c.mu.RLock()
entry, ok := c.entries[key]
c.mu.RUnlock()

if !ok {
// Record miss
if c.cacheMisses != nil {
c.cacheMisses.Inc()
}
return nil, false
}

// Check if entry has expired
if c.clock.Since(entry.timestamp) > c.ttl {
// Expired entry, treat as miss
if c.cacheMisses != nil {
c.cacheMisses.Inc()
}
return nil, false
}

// Record hit and refresh timestamp
if c.cacheHits != nil {
c.cacheHits.Inc()
}

// Refresh timestamp on hit to keep frequently accessed entries alive
c.mu.Lock()
entry.timestamp = c.clock.Now()
c.mu.Unlock()

return entry.output, true
}

func (c *RenderCacheImpl) put(templateVersionID, ownerID uuid.UUID, parameters map[string]string, output *preview.Output) {
key := makeKey(templateVersionID, ownerID, parameters)
c.mu.Lock()
defer c.mu.Unlock()

c.entries[key] = &cacheEntry{
output: output,
timestamp: c.clock.Now(),
}

// Update cache size metric
if c.cacheSize != nil {
c.cacheSize.Set(float64(len(c.entries)))
}
}

func makeKey(templateVersionID, ownerID uuid.UUID, parameters map[string]string) cacheKey {
return cacheKey{
templateVersionID: templateVersionID,
ownerID: ownerID,
parameterHash: hashParameters(parameters),
}
}

// hashParameters creates a deterministic hash of the parameter map.
func hashParameters(params map[string]string) uint64 {
if len(params) == 0 {
return 0
}

// Sort keys for deterministic hashing
keys := make([]string, 0, len(params))
for k := range params {
keys = append(keys, k)
}
sort.Strings(keys)

// Hash the sorted key-value pairs
var b string
for _, k := range keys {
b += fmt.Sprintf("%s:%s,", k, params[k])
}

return xxhash.Sum64String(b)
}

// cleanupLoop runs periodically to remove expired cache entries.
func (c *RenderCacheImpl) cleanupLoop(ctx context.Context) {
defer close(c.doneCh)

// Run cleanup every 15 minutes
cleanupFunc := func() error {
c.cleanup()
return nil
}

// Run once immediately
_ = cleanupFunc()

// Create a cancellable context for the ticker
tickerCtx, cancel := context.WithCancel(ctx)
defer cancel()

// Create ticker for periodic cleanup
tkr := c.clock.TickerFunc(tickerCtx, 15*time.Minute, cleanupFunc, "render-cache-cleanup")

// Wait for stop signal
<-c.stopCh
cancel()

_ = tkr.Wait()
}

// cleanup removes expired entries from the cache.
func (c *RenderCacheImpl) cleanup() {
c.mu.Lock()
defer c.mu.Unlock()

now := c.clock.Now()
for key, entry := range c.entries {
if now.Sub(entry.timestamp) > c.ttl {
delete(c.entries, key)
}
}

// Update cache size metric after cleanup
if c.cacheSize != nil {
c.cacheSize.Set(float64(len(c.entries)))
}
}
Loading
Loading