-
Notifications
You must be signed in to change notification settings - Fork 25
Expand file tree
/
Copy pathvalues.yaml
More file actions
587 lines (541 loc) · 20.2 KB
/
values.yaml
File metadata and controls
587 lines (541 loc) · 20.2 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
# chart instance name overrides
nameOverride: ""
fullnameOverride: ""
# labels configuration
# Allows overriding the labels and selectors used by resources
# If not set, defaults to new standard Kubernetes labels (app.kubernetes.io/*)
# For backwards compatibility with existing deployments, set to legacy format:
# labels: { app: pgdog }
# selectorLabels: { app: pgdog }
# NOTE: Changing selectors on existing deployments requires deleting and
# recreating the Deployment due to Kubernetes selector immutability
labels: {}
selectorLabels: {}
# allows adding custom annotations to the deployment
annotations: {}
# allows adding custom annotations to the pods
podAnnotations: {}
# restartOnConfigChange triggers a rolling restart when pgdog config changes.
# Disabled by default; enable if you don't have a hot-reload mechanism (e.g. SIGHUP sidecar).
# See: https://github.com/pgdogdev/helm/issues/15
# restartOnConfigChange: false
# clusterName identifies the Kubernetes cluster (optional)
# When set, this will be added as a label to all Prometheus metrics
# clusterName: ""
# image contains the Docker image properties.
image:
# repository is the Docker image repository
repository: ghcr.io/pgdogdev/pgdog
# tag is the Docker image tag (defaults to Chart appVersion if not specified)
tag: ""
# digest is the image digest (overrides tag when specified)
# Example: digest: sha256:abc123def456...
digest: ""
# pullPolicy specifies when to use cached version of the image.
pullPolicy: IfNotPresent
# name is the full image name (DEPRECATED: use repository and tag)
# If set, this takes precedence over repository and tag for backward
# compatibility
# name: ghcr.io/pgdogdev/pgdog:main
# port on which PgDog will run.
port: 6432
# healthcheckPort on which PgDog will expose healthcheck endpoint
# (if not specified, no separate healthcheck port is configured)
# healthcheckPort: 8080
# replicas indicates how many instances of PgDog will run (HA).
replicas: 2
# Uncomment to enable explicit grace period
# terminationGracePeriodSeconds: 90
# Optional: Delay stopping the container to allow the Kubernetes service to update its endpoints and
# allow clients to switch to other instances before PgDog shuts down.
# This is useful in combination with short client connection lifetimes to avoid errors during pod shutdown.
# preStopSleepSeconds: 25
# Optional: Deployment rollout strategy (Deployment only)
# strategy: {}
# Optional: container health probes (disabled by default)
# livenessProbe: {}
# readinessProbe: {}
# startupProbe: {}
# statefulSet controls whether to deploy as StatefulSet instead of Deployment
# StatefulSet provides stable DNS hostnames for each pod (e.g., pgdog-0, pgdog-1)
statefulSet:
# enabled switches from Deployment to StatefulSet
enabled: false
# env allows setting custom environment variables on the pgdog container
# Note: NODE_ID is automatically generated and cannot be overridden
# Example:
# env:
# - name: MY_VAR
# value: "my-value"
# - name: SECRET_VAR
# valueFrom:
# secretKeyRef:
# name: my-secret
# key: password
env: []
# Extra customization for advanced use cases
# extraInitContainers allows adding custom init containers that run before the main pgdog container
# Init containers are useful for setup tasks like downloading certificates, waiting for dependencies, etc.
# Example:
# extraInitContainers:
# - name: init-ca
# image: busybox
# command: ['sh', '-c', 'wget -O /pki/ca-bundle.pem https://truststore.example.com/ca-bundle.pem']
# volumeMounts:
# - name: pki
# mountPath: /pki
extraInitContainers: []
# extraVolumes allows adding custom volumes to the pod
# These volumes can be referenced by extraVolumeMounts or extraInitContainers
# Example:
# extraVolumes:
# - name: pki
# emptyDir: {}
extraVolumes: []
# extraVolumeMounts allows adding custom volume mounts to the main pgdog container
# The volumes must be defined in extraVolumes
# Example:
# extraVolumeMounts:
# - name: pki
# mountPath: /pki
extraVolumeMounts: []
# resources define resource requests and limits for the pgdog container
# Note: requests and limits are set to the same values for Guaranteed QoS
# Ratio: 1GB memory per 1 CPU (1000m CPU = 1Gi memory)
resources:
requests:
cpu: 1000m
memory: 1Gi
limits:
cpu: 1000m
memory: 1Gi
# prometheusResources define resource requests and limits for the
# prometheus sidecar (if enabled)
# Note: requests and limits are set to the same values for Guaranteed QoS
# Ratio: 1GB memory per 1 CPU (e.g., 100m CPU = 100Mi memory)
prometheusResources:
requests:
cpu: 100m
memory: 100Mi
limits:
cpu: 100m
memory: 100Mi
# openMetricsPort configures the port on which OpenMetrics
# (Prometheus) are exported
openMetricsPort: 9090
# prometheusPort configures the port on which Prometheus will export its metrics
# prometheusPort: 9091
# openMetricsNamespace configures the metrics namespace
openMetricsNamespace: pgdog_
# Admin password
# adminPassword: change-me
# databases contains the list of database entries in pgdog.toml
# Supports all arguments from pgdog.toml. Arguments are named in
# camelCase format.
databases: []
# users contains the list of user entries in users.toml
# Supports all arguments from users.toml. Arguments are named in
# camelCase format.
users: []
# mirrors contains a list of databases to replicate traffic from/to.
mirrors: []
# shardedSchemas contains the list of sharded schema entries in pgdog.toml
# Each entry requires: database and shard; name is optional
# Example:
# shardedSchemas:
# - database: "prod"
# name: "customer_a"
# shard: 0
shardedSchemas: []
# shardedTables contains the list of sharded table entries in pgdog.toml
# Each entry requires: database, column and dataType; name is optional
# Example:
# shardedTables:
# - database: "prod"
# name: "users"
# column: "id"
# dataType: "bigint"
shardedTables: []
# shardedMappings contains the list of sharded mapping entries in pgdog.toml
# Each entry requires: database, column, kind, shard; values, start and end are optional
# Example:
# shardedMappings:
# - database: "prod"
# column: "tenant_id"
# kind: "list"
# values: [1, 5, 1_000]
# shard: 0
# - database: "prod"
# column: "tenant_id"
# kind: "range"
# start: 1
# end: 100
# shard: 1
shardedMappings: []
# omnishardedTables contains the list of omnisharded table entries in pgdog.toml
# Each entry requires: database and tables; sticky is optional
# Example:
# omnishardedTables:
# - database: "prod"
# sticky: true
# tables:
# - "pg_class"
# - "pg_attribute"
# - "pg_attrdef"
# - "pg_index"
omnishardedTables: []
# plugins contains the list of plugin entries in pgdog.toml
# Each entry requires: name; config is optional (inline TOML content)
# When config is provided, a <name>.toml file is added to the ConfigMap
# and the plugin config path is set to /etc/pgdog/<name>.toml
# Example:
# plugins:
# - name: "pgdog_routing"
# config: |
# [routing]
# key = "value"
# - name: "pgdog_auth"
plugins: []
# service contains the Kubernetes service configuration
service:
# type specifies the type of Kubernetes service
# (ClusterIP, NodePort, LoadBalancer, etc)
type: ClusterIP
# annotations allows adding custom annotations to the service
annotations: {}
# traffic distribution mode for Kubernetes service
# (PreferClose, PreferSameZone, PreferSameNode)
# see https://kubernetes.io/docs/reference/networking/virtual-ips/#traffic-distribution for more details.
trafficDistribution: ""
# aws configures AWS Load Balancer Controller annotations
# When enabled, service type is automatically set to LoadBalancer
aws:
# enabled controls whether to add AWS LB annotations
enabled: false
# scheme controls whether the load balancer is internet-facing or internal
# Valid values: "internet-facing" or "internal"
scheme: "internet-facing"
# nodeSelector allows scheduling pods on nodes with specific labels
nodeSelector: {}
# tolerations allows pods to be scheduled on nodes with matching taints
tolerations: []
# affinity and anti-affinity rules for pod scheduling
affinity: {}
# Example: spread pods across different nodes for HA
# podAntiAffinity:
# preferredDuringSchedulingIgnoredDuringExecution:
# - weight: 100
# podAffinityTerm:
# labelSelector:
# matchExpressions:
# - key: app
# operator: In
# values:
# - pgdog
# topologyKey: kubernetes.io/hostname
# topologySpreadConstraints controls how pods are spread across topology domains
# https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
topologySpreadConstraints: []
# - maxSkew: 1
# topologyKey: topology.kubernetes.io/zone
# whenUnsatisfiable: DoNotSchedule
# labelSelector:
# matchLabels:
# app: pgdog
# podAntiAffinity enables default anti-affinity rules to spread pods
# across nodes
# Set to true for automatic pod anti-affinity configuration
podAntiAffinity:
enabled: true
# type can be "soft" (preferred) or "hard" (required)
type: soft
# podDisruptionBudget helps maintain availability during voluntary disruptions
podDisruptionBudget:
enabled: true
# minAvailable specifies the minimum number of pods that must be available
# Can be an absolute number or a percentage (e.g., "50%")
minAvailable: 1
# maxUnavailable specifies the maximum number of pods that can be unavailable
# Only one of minAvailable or maxUnavailable can be specified
# maxUnavailable: 1
# serviceAccount configuration for RBAC
serviceAccount:
# create specifies whether a ServiceAccount should be created
create: true
# annotations to add to the service account
annotations: {}
# name of the service account to use
# (if not set and create is true, a name is generated)
name: ""
# rbac configuration
rbac:
# create specifies whether RBAC resources should be created
create: true
# externalSecrets integration for secrets management
externalSecrets:
# enabled controls whether to use ExternalSecrets instead of plain
# Kubernetes secrets
enabled: false
# create controls whether to create an ExternalSecret resource or
# use an existing one
# Set to false if you want to reference an externally managed ExternalSecret
create: true
# name of the ExternalSecret to create or reference
# (if empty, uses the chart fullname)
name: ""
# secretName is the name of the Kubernetes Secret that the
# ExternalSecret creates/targets (if empty, uses the chart fullname)
# Only needed when using an existing ExternalSecret with a custom
# target secret name
secretName: ""
# refreshInterval defines how often to sync secrets from external
# source (only used when create: true)
refreshInterval: 1h
# secretStoreRef references the SecretStore to use
# (only used when create: true)
secretStoreRef:
name: ""
kind: SecretStore
# remoteRefs defines the external secret references
# (only used when create: true)
remoteRefs: []
# Example structure:
# - secretKey: users.toml
# remoteRef:
# key: pgdog/users
# property: users.toml
# ServiceMonitor for Prometheus metrics
serviceMonitor:
enabled: false
# Grafana remote write configuration for Prometheus
grafanaRemoteWrite:
# url is the Grafana remote write endpoint
# When set, remote_write will be enabled automatically
# Example: https://prometheus-prod-XX-XXX.grafana.net/api/prom/push
url: ""
# basicAuth credentials for Grafana Cloud
basicAuth:
# username is typically a numeric user ID for Grafana Cloud
username: ""
# password is the Grafana Cloud API key/token
password: ""
# queueConfig allows fine-tuning the remote write queue behavior
queueConfig:
# capacity is the number of samples to buffer per shard before blocking
capacity: 10000
# maxShards is the maximum number of shards (parallelism)
maxShards: 50
# minShards is the minimum number of shards
minShards: 1
# maxSamplesPerSend is the maximum number of samples per send
maxSamplesPerSend: 5000
# batchSendDeadline is the maximum time samples will wait in buffer
batchSendDeadline: 5s
# minBackoff is the initial retry delay
minBackoff: 30ms
# maxBackoff is the maximum retry delay
maxBackoff: 5s
# Gateway configuration (Enterprise Edition)
# The gateway provides query stats collection and monitoring capabilities
# NOTE: Gateway runs as a single replica only
gateway:
# enabled controls whether to deploy the gateway
enabled: false
# labels allows overriding gateway resource labels (defaults to app.kubernetes.io/* format)
labels: {}
# selectorLabels allows overriding gateway selector labels
# WARNING: Changing selectors requires recreating Deployments
selectorLabels: {}
# image contains the Docker image properties for the gateway
image:
# repository is the Docker image repository
repository: ghcr.io/pgdogdev/pgdog-enterprise/gateway
# tag is the Docker image tag
tag: "main-ent"
# pullPolicy specifies when to use cached version of the image
pullPolicy: Always
# name is the full image name (if set, takes precedence)
# name: ghcr.io/pgdogdev/gateway:latest
# port on which the gateway will run
port: 8443
# bindAddress is the address the gateway binds to
bindAddress: "0.0.0.0"
# resources define resource requests and limits for the gateway container
resources:
requests:
cpu: 1000m
memory: 1Gi
limits:
cpu: 1000m
memory: 1Gi
# tls configuration for the gateway
tls:
# existingSecret references an existing TLS secret (must contain tls.crt and tls.key)
existingSecret: ""
# certPath is the path to the certificate file (if not using existingSecret)
# Defaults to snakeoil certificate if not specified
certPath: ""
# keyPath is the path to the private key file (if not using existingSecret)
# Defaults to snakeoil key if not specified
keyPath: ""
# control configuration for the gateway
control:
# url is the WebSocket endpoint for control communication
url: ""
# token is the authentication token for control communication
token: ""
# stats configuration intervals (in milliseconds)
stats:
activeQueriesInterval: 1000
pingInterval: 1000
pushInterval: 1000
configInterval: 1000
# service contains the Kubernetes service configuration for gateway
service:
# type specifies the type of Kubernetes service
type: ClusterIP
# annotations allows adding custom annotations to the service
annotations: {}
# nodeSelector allows scheduling pods on nodes with specific labels
nodeSelector: {}
# tolerations allows pods to be scheduled on nodes with matching taints
tolerations: []
# affinity rules for pod scheduling
affinity: {}
# connectFromPgdog enables pgdog to connect to the gateway
# When enabled, pgdog.toml will include a [gateway] section
connectFromPgdog: false
# Query stats (PgDog EE)
queryStats:
# enabled enables pgdog query stats (EE version only)
enabled: false
queryPlanThreshold: 1000
queryPlanMaxAge: 15000
queryPlansCache: 100
maxErrors: 100
maxErrorAge: 300000
# Control configuration (PgDog EE)
# Enables communication with PgDog Cloud control plane
control:
# enabled controls whether to include [control] section in pgdog.toml
enabled: false
# endpoint is the control plane URL
endpoint: ""
# token is the authentication token for the control plane
token: ""
# metricsInterval defines how often to push metrics (in milliseconds)
metricsInterval: 1000
# statsInterval defines how often to push stats (in milliseconds)
statsInterval: 5000
# activeQueriesInterval defines how often to push active queries (in milliseconds)
activeQueriesInterval: 5000
# errorsInterval defines how often to push errors (in milliseconds)
errorsInterval: 5000
# requestTimeout defines the timeout for control plane requests (in milliseconds)
requestTimeout: 1000
# queryTimingsChunkSize defines the chunk size for query timings
queryTimingsChunkSize: 25
# queryTimingsNewQueryQueueSize defines the queue size for new query timings
queryTimingsNewQueryQueueSize: 1000
# LSN check configuration for replication failover auto mode (in milliseconds)
# See: https://docs.pgdog.dev/features/load-balancer/replication-failover/
# lsnCheckDelay: 0 # Set to 0 to start LSN monitoring immediately
# lsnCheckInterval: 1000 # How frequently to re-fetch replication status
# TCP keep-alive configuration (optional)
# These settings control socket-level keep-alive behavior.
# All time values are in milliseconds.
# tcpKeepalive: true
# tcpTime: 7200000 # 2 hours (Linux default: 7200s)
# tcpInterval: 75000 # 75 seconds (Linux default: 75s)
# tcpRetries: 9
# Memory configuration (optional)
# These settings control buffer and stack sizes
# memoryNetBuffer: 8192
# memoryMessageBuffer: 8192
# memoryStackSize: 2097152
# Query parser configuration
# queryParser controls whether the query parser is enabled
# Valid values: "auto", "on", "off"
# queryParser: "auto"
# queryParserEngine specifies which query parser engine to use
# queryParserEngine: ""
# queryParserEnabled is DEPRECATED - use queryParser instead
# queryParserEnabled: true
# Prometheus Collector configuration
# Deploys a standalone Prometheus instance that collects metrics from all pgdog pods
prometheusCollector:
# enabled controls whether to deploy the Prometheus collector
enabled: false
# labels allows overriding prometheus-collector resource labels
labels: {}
# selectorLabels allows overriding prometheus-collector selector labels
selectorLabels: {}
# podAnnotations allows adding custom annotations to the prometheus-collector pods
podAnnotations: {}
# image contains the Docker image properties for Prometheus
image:
repository: prom/prometheus
tag: latest
pullPolicy: IfNotPresent
# port on which Prometheus will expose metrics
port: 9090
# scrapeInterval defines how often to scrape targets
scrapeInterval: 15s
# evaluationInterval defines how often to evaluate rules
evaluationInterval: 15s
# resources define resource requests and limits for the Prometheus container
resources:
requests:
cpu: 200m
memory: 256Mi
limits:
cpu: 500m
memory: 512Mi
# storage configuration for Prometheus TSDB
storage:
# size is the maximum size of the emptyDir volume
size: 10Gi
# retention configuration for Prometheus TSDB
retention:
# time is the maximum duration to keep data (e.g., 15d, 6h)
time: 15d
# size is the maximum size of data to retain (e.g., 5GB, 500MB)
# Prometheus will delete oldest data first when this limit is exceeded
size: 5GB
# tls configuration for enabling HTTPS on the Prometheus endpoint
tls:
# enabled controls whether TLS is enabled for Prometheus
# When enabled, a self-signed certificate is automatically generated
enabled: false
# basicAuth configuration for protecting the Prometheus endpoint
basicAuth:
# enabled controls whether basic auth is required to access Prometheus
enabled: false
# username for basic auth
username: ""
# password is the plaintext password (used for health checks)
password: ""
# passwordHash is the bcrypt hash of the password
# Generate with: htpasswd -nBC 10 "" | tr -d ':\n'
# Or use Python: python -c "import bcrypt; print(bcrypt.hashpw(b'password', bcrypt.gensalt()).decode())"
passwordHash: ""
# service contains the Kubernetes service configuration
service:
# type specifies the type of Kubernetes service (ignored when aws.enabled is true)
type: ClusterIP
# annotations allows adding custom annotations to the service
annotations: {}
# aws configures AWS Load Balancer Controller annotations
# When enabled, service type is automatically set to LoadBalancer
aws:
# enabled controls whether to add AWS LB annotations
enabled: false
# scheme controls whether the load balancer is internet-facing or internal
# Valid values: "internet-facing" or "internal"
scheme: "internet-facing"
# nodeSelector allows scheduling pods on nodes with specific labels
nodeSelector: {}
# tolerations allows pods to be scheduled on nodes with matching taints
tolerations: []
# affinity rules for pod scheduling
affinity: {}