forked from DreamLab-AI/origin-logseq-AR
-
Notifications
You must be signed in to change notification settings - Fork 19
Expand file tree
/
Copy pathDockerfile.unified
More file actions
390 lines (329 loc) · 15.5 KB
/
Dockerfile.unified
File metadata and controls
390 lines (329 loc) · 15.5 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
# =============================================================================
# VisionFlow Unified Dockerfile - Multi-Stage Build (CachyOS)
# =============================================================================
# This Dockerfile provides a unified build process for both development and
# production environments using multi-stage builds and build arguments.
#
# Build Targets:
# - development: Full toolchain with hot-reload, debugging tools
# - production: Optimized, minimal runtime with security hardening
#
# Build Arguments:
# BUILD_TARGET: development|production (default: development)
# CUDA_ARCH: CUDA compute capability (default: 75 for portable PTX)
#
# Usage Examples:
# Development: docker build --target development -t visionflow:dev .
# Production: docker build --target production -t visionflow:prod .
# Custom CUDA: docker build --build-arg CUDA_ARCH=75 --target production .
# =============================================================================
# =============================================================================
# STAGE 1: Base - Common foundation for all targets
# =============================================================================
# SECURITY: Pin to digest for production builds (e.g., cachyos/cachyos-v3@sha256:abc123...)
FROM cachyos/cachyos-v3:latest AS base
# Build arguments
ARG CUDA_ARCH=75
ARG BUILD_TARGET=development
# Environment variables common to all stages
# CUDA_ARCH is promoted from ARG to ENV so child stages inherit it for nvcc
ENV RUST_LOG=${RUST_LOG:-warn} \
NVIDIA_VISIBLE_DEVICES=all \
NVIDIA_DRIVER_CAPABILITIES=all \
CUDA_HOME=/opt/cuda \
CUDA_PATH=/opt/cuda \
CUDA_ARCH=${CUDA_ARCH} \
LD_LIBRARY_PATH="/opt/cuda/lib64:${LD_LIBRARY_PATH}" \
PATH="/root/.cargo/bin:/opt/cuda/bin:${PATH}"
# Initialize pacman keyring (required for CachyOS signature verification)
RUN pacman-key --init && \
pacman-key --populate archlinux cachyos && \
pacman -Sy --noconfirm archlinux-keyring cachyos-keyring && \
pacman -Syu --noconfirm && \
rm -rf /var/cache/pacman/pkg/*
# Install base system dependencies (with retry logic for transient failures)
RUN for attempt in 1 2 3; do \
echo "=== Package install attempt $attempt/3 ===" && \
pacman -S --noconfirm --needed \
# Core build tools \
curl \
git \
# Compilers (CachyOS ships modern GCC, no version pinning needed) \
gcc \
base-devel \
# Libraries (openssl included in base, pkgconf in base-devel) \
openssl \
# Network utilities \
openbsd-netcat \
lsof \
# Utilities \
expect \
&& break || { echo "Attempt $attempt failed, retrying in 10s..."; sleep 10; pacman -Syy --noconfirm; }; \
done && rm -rf /var/cache/pacman/pkg/*
# Install CUDA via pacman (CachyOS installs to /opt/cuda)
# --overwrite needed because base image ships nvidia-opencl libs that conflict
RUN for attempt in 1 2 3; do \
echo "=== CUDA install attempt $attempt/3 ===" && \
pacman -S --noconfirm --needed --overwrite '/usr/lib/libnvidia-opencl*' cuda \
&& break || { echo "Attempt $attempt failed, retrying in 10s..."; sleep 10; pacman -Syy --noconfirm; }; \
done && rm -rf /var/cache/pacman/pkg/* && \
echo "=== CUDA install verification ===" && \
ls -la /opt/cuda/bin/nvcc 2>/dev/null && echo "CUDA nvcc found" || echo "nvcc NOT found" && \
ls -la /opt/cuda/lib64/libcudart.so* 2>/dev/null | head -3 || echo "libcudart NOT found"
# Verify and setup CUDA installation path
# find_cuda_helper checks multiple standard CUDA locations
# Ensure /usr/local/cuda also points to the correct CUDA installation
RUN echo "=== CUDA Installation Check ===" && \
if [ ! -d /opt/cuda ]; then \
echo "Looking for CUDA installation..."; \
CUDA_ROOT=$(find / -maxdepth 3 -name "nvcc" -type f 2>/dev/null | head -1 | xargs dirname | xargs dirname); \
if [ -n "$CUDA_ROOT" ] && [ -d "$CUDA_ROOT" ]; then \
echo "Found CUDA at: $CUDA_ROOT"; \
mkdir -p /opt/cuda && ln -sf "$CUDA_ROOT"/* /opt/cuda/ 2>/dev/null || true; \
fi; \
fi && \
# Also link to /usr/local/cuda which find_cuda_helper prefers
if [ -d /opt/cuda ]; then \
ln -sf /opt/cuda /usr/local/cuda 2>/dev/null || true; \
fi && \
# Verify CUDA is findable
echo "Verifying CUDA locations:" && \
ls -lh /opt/cuda/bin/nvcc 2>/dev/null && echo "✓ /opt/cuda/bin/nvcc found" || echo "✗ /opt/cuda/bin/nvcc not found" && \
ls -lh /usr/local/cuda/bin/nvcc 2>/dev/null && echo "✓ /usr/local/cuda/bin/nvcc found" || echo "✗ /usr/local/cuda/bin/nvcc not found" && \
echo "CUDA path setup complete"
# Configure environment for CUDA build tools
# The find_cuda_helper crate checks CUDA_PATH, CUDA_HOME, /opt/cuda, and /usr/local/cuda
# Set these explicitly so cargo build scripts can find CUDA
ENV CUDA_PATH="/opt/cuda" \
CUDA_HOME="/opt/cuda" \
CUDA_INCLUDE_PATH="/opt/cuda/include" \
CUDA_LIB_PATH="/opt/cuda/lib64" \
LIBRARY_PATH="/opt/cuda/lib64:${LIBRARY_PATH}" \
CPATH="/opt/cuda/include:${CPATH}" \
CFLAGS="-I/opt/cuda/include" \
LDFLAGS="-L/opt/cuda/lib64"
# Install Rust toolchain (stable channel)
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | \
sh -s -- -y --default-toolchain stable --profile minimal
# Install Node.js 20.x LTS via tarball
RUN curl -fsSL https://nodejs.org/dist/v20.18.3/node-v20.18.3-linux-x64.tar.xz | \
tar -xJ -C /usr/local --strip-components=1
WORKDIR /app
# =============================================================================
# STAGE 2: Rust Dependencies - Cache Rust dependency compilation
# =============================================================================
FROM base AS rust-deps
# Copy only dependency manifests first for better layer caching
COPY Cargo.toml build.rs ./
COPY Cargo.lock* ./
COPY whelk-rs ./whelk-rs
# Copy CUDA source files needed by build.rs for PTX compilation
COPY src/utils/*.cu src/utils/
# Create dummy Rust source to build dependencies without real code
# Must create placeholders for all [[bin]] targets in Cargo.toml
RUN mkdir -p src/bin examples && \
echo "fn main() {}" > src/main.rs && \
echo "pub fn lib() {}" > src/lib.rs && \
echo "fn main() {}" > src/bin/generate_types.rs && \
echo "fn main() {}" > src/bin/sync_local.rs && \
echo "fn main() {}" > src/bin/sync_github.rs && \
echo "fn main() {}" > examples/constraint_integration_debug.rs && \
echo "fn main() {}" > examples/metadata_debug.rs && \
echo "fn main() {}" > examples/ontology_constraints_example.rs && \
echo "fn main() {}" > examples/ontology_validation_example.rs
# Fetch and build dependencies (this layer is cached unless deps change)
# Set CUDA env vars explicitly in RUN to ensure they're available to build scripts
RUN export CUDA_PATH=/opt/cuda && \
export CUDA_HOME=/opt/cuda && \
export CUDA_INCLUDE_PATH=/opt/cuda/include && \
export CUDA_LIB_PATH=/opt/cuda/lib64 && \
export LD_LIBRARY_PATH=/opt/cuda/lib64:${LD_LIBRARY_PATH} && \
cargo fetch && \
cargo build --release --features gpu && \
rm -rf src
# =============================================================================
# STAGE 3: Rust Builder - Compile Rust backend with GPU support
# =============================================================================
FROM rust-deps AS rust-builder
# Copy actual source code
COPY src ./src
COPY data/schema ./data/schema
# Force cargo to recompile: Docker COPY preserves host mtimes which may be
# older than Stage 2 build artifacts, causing cargo to skip recompilation
RUN find src -name '*.rs' -exec touch {} + && touch build.rs
# Build Rust backend with GPU features
# This compiles the actual application code
RUN export CUDA_PATH=/opt/cuda && \
export CUDA_HOME=/opt/cuda && \
export CUDA_INCLUDE_PATH=/opt/cuda/include && \
export CUDA_LIB_PATH=/opt/cuda/lib64 && \
export LD_LIBRARY_PATH=/opt/cuda/lib64:${LD_LIBRARY_PATH} && \
cargo build --release --features gpu && \
strip target/release/webxr && \
# Verify binary was created
ls -lh target/release/webxr
# =============================================================================
# STAGE 4: Node Dependencies - Install and cache Node.js dependencies
# =============================================================================
FROM base AS node-deps
WORKDIR /app/client
# Copy only package files and scripts for better layer caching
COPY client/package*.json ./
COPY client/scripts ./scripts
# Install Node.js dependencies
# --production flag is added later in production stage
# DOCKER_BUILD skips security check for test packages (not used in prod)
ENV DOCKER_BUILD=1
RUN npm ci --prefer-offline --no-audit
# =============================================================================
# STAGE 5: Node Builder - Build frontend assets (production only)
# =============================================================================
FROM node-deps AS node-builder
# Copy frontend source code
COPY client ./
# Build optimized production bundle
# Call vite directly: npm's prebuild/build scripts run types:generate which
# requires Cargo (available in rust-builder, not here). Generated types are
# committed to source control, so vite can compile without regenerating them.
RUN npx vite build && \
# Verify build output
ls -lh dist/
# =============================================================================
# STAGE 6: Development Target - Full development environment
# =============================================================================
FROM base AS development
# Install development-specific tools (with retry logic)
RUN for attempt in 1 2 3; do \
echo "=== Dev tools install attempt $attempt/3 ===" && \
pacman -S --noconfirm --needed \
docker \
nginx \
vim \
python python-pip \
&& break || { echo "Attempt $attempt failed, retrying in 10s..."; sleep 10; pacman -Syy --noconfirm; }; \
done && rm -rf /var/cache/pacman/pkg/*
# Install supervisor via pip (not available in pacman)
RUN pip install --break-system-packages supervisor
# Create Nginx directories
RUN mkdir -p /var/log/nginx /var/run/nginx && \
chown -R http:http /var/run/nginx
# Create application directories
RUN mkdir -p \
/app/user_settings \
/app/client \
/app/logs \
/app/scripts \
/app/target
# Copy source code for hot-reload development
COPY Cargo.toml build.rs ./
COPY Cargo.lock* ./
COPY src ./src
COPY data/schema ./data/schema
COPY whelk-rs ./whelk-rs
# Copy client source (not built, will use dev server)
COPY client ./client
WORKDIR /app/client
# Copy node_modules from node-deps stage
COPY --from=node-deps /app/client/node_modules ./node_modules
WORKDIR /app
# Pre-fetch Rust dependencies for faster rebuilds
RUN cargo fetch
# Copy development configuration files
COPY nginx.dev.conf /etc/nginx/nginx.conf
COPY data/settings.yaml /app/settings.yaml
COPY supervisord.dev.conf ./supervisord.dev.conf
# Copy entrypoint scripts
COPY scripts/dev-entrypoint.sh ./
COPY scripts/rust-backend-wrapper.sh ./scripts/
RUN chmod +x ./dev-entrypoint.sh ./scripts/rust-backend-wrapper.sh
# Development environment variables
ENV NODE_ENV=development \
RUST_LOG=debug \
DOCKER_ENV=1 \
VITE_DEV_SERVER_PORT=5173 \
VITE_API_PORT=4000 \
VITE_HMR_PORT=24678 \
SYSTEM_NETWORK_PORT=4000
# Expose development ports
# 3001: Nginx entry point
# 4000: Rust backend API (direct access)
# 5173: Vite dev server (proxied via Nginx)
# 24678: Vite HMR websocket (proxied via Nginx)
EXPOSE 3001 4000 5173 24678
# Development entrypoint rebuilds Rust on startup for code changes
ENTRYPOINT ["./dev-entrypoint.sh"]
# =============================================================================
# STAGE 7: Production Target - Optimized runtime environment
# =============================================================================
# SECURITY: Pin to digest for production builds (e.g., cachyos/cachyos-v3@sha256:abc123...)
FROM cachyos/cachyos-v3:latest AS production
# Initialize pacman keyring for production stage
RUN pacman-key --init && \
pacman-key --populate archlinux cachyos && \
pacman -Sy --noconfirm archlinux-keyring cachyos-keyring && \
pacman -Syu --noconfirm && \
rm -rf /var/cache/pacman/pkg/*
# Install only runtime dependencies (no base-devel, no cuda dev headers)
RUN for attempt in 1 2 3; do \
echo "=== Runtime packages install attempt $attempt/3 ===" && \
pacman -S --noconfirm --needed \
openssl \
nginx \
curl \
python python-pip \
&& break || { echo "Attempt $attempt failed, retrying in 10s..."; sleep 10; pacman -Syy --noconfirm; }; \
done && rm -rf /var/cache/pacman/pkg/*
# Install supervisor via pip (not available in pacman)
RUN pip install --break-system-packages supervisor
# Create non-root user for security
RUN useradd -m -u 1000 -s /bin/bash appuser && \
# Create Nginx directories
mkdir -p /var/log/nginx /var/run/nginx && \
chown -R http:http /var/run/nginx /var/log/nginx
# Copy CUDA runtime libraries (only runtime, not development files)
COPY --from=base /opt/cuda/lib64/libcudart.so* /opt/cuda/lib64/
COPY --from=base /opt/cuda/lib64/libnvrtc.so* /opt/cuda/lib64/
ENV CUDA_HOME=/opt/cuda \
LD_LIBRARY_PATH="/opt/cuda/lib64:${LD_LIBRARY_PATH}" \
NVIDIA_VISIBLE_DEVICES=all \
NVIDIA_DRIVER_CAPABILITIES=compute,utility
WORKDIR /app
# Create application directories with proper ownership
RUN mkdir -p \
/app/data \
/app/data/markdown \
/app/data/metadata \
/app/user_settings \
/app/logs \
/app/client/dist \
&& chown -R appuser:appuser /app
# Copy compiled Rust binary from builder
COPY --from=rust-builder --chown=appuser:appuser /app/target/release/webxr ./webxr
# Copy built frontend assets from node-builder
COPY --from=node-builder --chown=appuser:appuser /app/client/dist ./client/dist
# Copy production configuration
COPY --chown=appuser:appuser nginx.production.conf /etc/nginx/nginx.conf
COPY --chown=appuser:appuser data/settings.yaml ./settings.yaml
COPY --chown=appuser:appuser supervisord.production.conf ./supervisord.production.conf
# Copy production entrypoint
COPY --chown=appuser:appuser scripts/prod-entrypoint.sh ./
RUN chmod +x ./prod-entrypoint.sh
# Production environment variables
ENV NODE_ENV=production \
RUST_LOG=warn \
DOCKER_ENV=1 \
SYSTEM_NETWORK_PORT=4000
# Switch to non-root user for security
USER appuser
# Expose only production port (Nginx entry point)
EXPOSE 4000
# Health check for production readiness
HEALTHCHECK --interval=30s --timeout=10s --retries=3 --start-period=40s \
CMD curl -f http://localhost:4000/api/health || exit 1
# Production entrypoint uses pre-built binaries
ENTRYPOINT ["./prod-entrypoint.sh"]
# =============================================================================
# Default target selection based on BUILD_TARGET argument
# =============================================================================
# Note: Use --target flag to explicitly select build stage
# =============================================================================