diff --git a/.github/workflows/build-and-release.yml b/.github/workflows/build-and-release.yml index 6fd1749b..f10da0f7 100644 --- a/.github/workflows/build-and-release.yml +++ b/.github/workflows/build-and-release.yml @@ -1,52 +1,34 @@ name: Olake UI Build And Release + on: - workflow_call: + workflow_dispatch: inputs: - environment: - description: "Environment to build (master, staging, dev)" - required: true - default: "" - type: string version: description: "Version to release" required: true - default: "" - type: string - workflow_dispatch: + workflow_call: inputs: - environment: - description: "Environment to build (master, staging, dev)" - required: true - default: "dev" - type: choice - options: - - master - - staging - - dev version: description: "Version to release" required: true - default: "v0.0.0.dev" type: string + default: "" jobs: build_and_publish_frontend: - name: Build and publish frontend image for ${{ inputs.environment }} - environment: ${{ inputs.environment }} + name: Build and publish frontend image runs-on: ubuntu-latest + environment: Build UI + env: DOCKER_LOGIN: ${{ secrets.DOCKER_USERNAME }} DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }} DOCKER_REPO: ${{ secrets.DOCKER_REPO || 'olakego' }} - DOCKER_REPO_WORKER: ${{ secrets.DOCKER_REPO_WORKER || 'olakego' }} - ENVIRONMENT: ${{ inputs.environment }} - VERSION: ${{ inputs.version }} + VERSION: ${{ inputs.version || github.event.inputs.version || 'v0.0.0.dev' }} steps: - name: Checkout code uses: actions/checkout@v3 - with: - ref: ${{ inputs.environment == 'master' && 'master' || (inputs.environment == 'staging' && 'staging' || inputs.environment == 'dev' && 'ci/workerReleaseIssues' || 'develop') }} - name: Set up Node.js uses: actions/setup-node@v3 @@ -66,9 +48,8 @@ jobs: - name: Setup environment variables run: | - echo "ENVIRONMENT=${{ env.ENVIRONMENT }}" >> $GITHUB_ENV echo "VERSION=${{ env.VERSION }}" >> $GITHUB_ENV - echo "Building frontend application for $ENVIRONMENT with version $VERSION" + echo "Building frontend for branch $GITHUB_REF_NAME with version $VERSION" - name: Run Release tool run: | diff --git a/.github/workflows/integration-test.yml b/.github/workflows/integration-test.yml new file mode 100644 index 00000000..12da8a0e --- /dev/null +++ b/.github/workflows/integration-test.yml @@ -0,0 +1,29 @@ +name: Integration Tests +on: + push: + branches: + - master + pull_request: + branches: + - "*" + +jobs: + integration-tests: + runs-on: 32gb-runner + timeout-minutes: 30 + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Set up Go + uses: actions/setup-go@v4 + with: + go-version: '1.23.2' + + - name: Download Go dependencies + working-directory: ./server + run: go mod download + + - name: Run Docker In Docker Container Tests + working-directory: ./server + run: go test -v ./tests -timeout 0 -run 'TestDinDIntegration' \ No newline at end of file diff --git a/.github/workflows/security-ci.yaml b/.github/workflows/security-ci.yaml index cd6d09f6..7217d6f5 100644 --- a/.github/workflows/security-ci.yaml +++ b/.github/workflows/security-ci.yaml @@ -3,11 +3,9 @@ on: push: branches: - "master" - - "feat/bff-api" pull_request: branches: - "*" - - "feat/bff-api" workflow_dispatch: inputs: logLevel: @@ -15,6 +13,7 @@ on: required: true default: "warning" + jobs: govulncheck: name: govulncheck @@ -23,13 +22,13 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - check-latest: "true" - go-version: "1.24.x" + go-version: "1.24.x" # Exact version for stability - name: Install govulncheck run: go install golang.org/x/vuln/cmd/govulncheck@latest - name: Run vulnerability checks working-directory: ./server run: govulncheck ./... + gosec: name: GoSec Security Scanner runs-on: ubuntu-latest @@ -37,8 +36,7 @@ jobs: - uses: actions/checkout@v3 - uses: actions/setup-go@v3 with: - check-latest: "true" - go-version: "1.24.x" + go-version: "1.24.x" # Removed check-latest to prevent upgrade to 1.25.x - name: install gosec run: curl -sfL https://raw.githubusercontent.com/securego/gosec/master/install.sh | sh -s -- -b $(go env GOPATH)/bin - name: Run Gosec Security Scanner diff --git a/.gitignore b/.gitignore index 382c9961..2f3152b9 100644 --- a/.gitignore +++ b/.gitignore @@ -45,3 +45,11 @@ npm-debug* # ---------------------------- *.env *.env.* + +# ---------------------------- +# Test Files +# ---------------------------- +ui/test-results/ +ui/playwright-report/ +ui/blob-report/ +ui/tests/.auth/ diff --git a/Dockerfile b/Dockerfile index b1d1931e..4d8fcfd9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,6 +1,7 @@ # Stage 1: Go Builder (Backend) FROM golang:1.24.2-alpine AS go-builder + # Install git, as it might be needed by go mod download or go build RUN apk add --no-cache git @@ -19,6 +20,8 @@ RUN cd server && go build -ldflags="-w -s" -o /app/olake-server . # Stage 2: Frontend Builder FROM node:20-alpine AS node-builder + +# Reuse build-time arguments during UI build if needed WORKDIR /app/ui # Install pnpm globally diff --git a/Makefile b/Makefile index dd076e99..78863004 100644 --- a/Makefile +++ b/Makefile @@ -51,10 +51,11 @@ BACKEND_ENV_VARS = \ OLAKE_POSTGRES_PORT=5432 \ OLAKE_POSTGRES_DBNAME=postgres \ OLAKE_POSTGRES_SSLMODE=disable \ - LOGS_DIR=./logger/logs \ + LOGS_DIR=$(PWD)/logger/logs \ SESSION_ON=true \ TEMPORAL_ADDRESS=localhost:7233 \ - CONTAINER_REGISTRY_BASE=registry-1.docker.io + CONTAINER_REGISTRY_BASE=registry-1.docker.io \ + PERSISTENT_DIR=$(PWD)/olake-config # Frontend environment variables FRONTEND_ENV_VARS = \ diff --git a/api-contract.md b/api-contract.md index d703fc08..a9a0fbd3 100644 --- a/api-contract.md +++ b/api-contract.md @@ -3,9 +3,8 @@ ### For now use olake as project id, later on it can be used to make multitenant system ## Base URL - ``` -http://localhost:8000 +http://localhost:8080 ``` ## Authentication @@ -41,7 +40,7 @@ http://localhost:8000 - **Request Body**: ```json { - "email": "string", + "email":"string", "username": "string", "password": "string" } @@ -53,21 +52,20 @@ http://localhost:8000 "success": "boolean", "message": "string", "data": { - "email": "string", - "username": "string" + "email":"string", + "username": "string", } } ``` ### Check Authentication - - **Endpoint**: `/auth` - **Method**: GET - **Description**: Verify if user is authenticated - **Headers**: `Authorization: Bearer ` // we are using cookie currently so frontend take care accordingly - **Response**: ```json - { + { "success": "boolean", "message": "string", "data": { @@ -77,7 +75,28 @@ http://localhost:8000 ``` ## Sources +### Get All Version Of Source +- **Endpoint**: `/api/v1/project/:projectid/sources/versions` +- **Method**: GET +- **Description**: Give spec based on source type +- **Headers**: `Authorization: Bearer ` +- **Request Body**: + ```json + { + "type":"string", + } + ``` +- **Response**: + ```json + { + "success": "boolean", + "message": "string", + "data": { + "version":["string","string"] + } + } + ``` ### Get Spec Of Source - **Endpoint**: `/api/v1/project/:projectid/sources/spec` @@ -87,8 +106,8 @@ http://localhost:8000 - **Request Body**: ```json { - "type": "string", - "version": "string" + "type":"string", + "version": "string", } ``` - **Response**: @@ -118,8 +137,8 @@ http://localhost:8000 ```json { - "type": "string", - "version": "string", + "type":"string", + "version":"string", "config": "json" } ``` @@ -127,15 +146,17 @@ http://localhost:8000 - **Response**: ```json - { - "success": "boolean", - "message": "string", - "data": { - "type": "string", - "version": "string", - "config": "json" - } +{ + "success": boolean, + "message": "string", + "data": { + "connection_result": { + "message": "string", + "status": "string" + }, + "logs": "json" } +} ``` ### Create Source @@ -147,9 +168,9 @@ http://localhost:8000 - **Request Body**: ```json { - "name": "string", // we have to make sure in database that it must also unique according to project id (for doubt let us discuss) - "type": "string", - "version": "string", // this field need to be shown on frontend as well, we discussed at time of design as well + "name": "string", // we have to make sure in database that it must also unique according to project id (for doubt let us discuss) + "type": "string", + "version":"string", // this field need to be shown on frontend as well, we discussed at time of design as well "config": "json" } ``` @@ -158,13 +179,12 @@ http://localhost:8000 { "success": "boolean", "message": "string", - "data": { - // whatever received send back - "name": "string", - "type": "string", - "version": "string", - "config": "json" - } + "data": { // whatever received send back + "name": "string", + "type": "string", + "version":"string", + "config": "json" + } } ``` @@ -203,7 +223,6 @@ http://localhost:8000 } ] } - ``` ### Update Source @@ -214,9 +233,9 @@ http://localhost:8000 - **Request Body**: ```json { - "name": "string", - "type": "string", - "version": "string", + "name": "string", + "type": "string", + "version":"string", "config": "json" } ``` @@ -225,39 +244,57 @@ http://localhost:8000 { "success": "boolean", "message": "string", - "data": { - // send same back - "name": "string", - "type": "string", - "version": "string", + "data": { // send same back + "name": "string", + "type": "string", + "version":"string", "config": "json" } } ``` -### Delete Source +### Delete Source - **Endpoint**: `/api/v1/project/:projectid/sources/:id` - **Method**: DELETE - **Description**: Delete a source - **Headers**: `Authorization: Bearer ` - **Response**: - ```json -{ - // Note: it is soft delete not hard delete - "success": "boolean", - "message": "string", - "data": { - "name": "string" // name of source deleted + { // Note: it is soft delete not hard delete + "success": "boolean", + "message": "string", + "data": { + "name" :"string", // name of source deleted + } } -} ``` + ## Destinations +### Get All Version Of Destinations +- **Endpoint**: `/api/v1/project/:projectid/destinations/versions` +- **Method**: GET +- **Description**: Give spec based on source type +- **Headers**: `Authorization: Bearer ` +- **Request Body**: + ```json + { + "type":"string", + } + ``` +- **Response**: + ```json + { + "success": "boolean", + "message": "string", + "data": { + "version":["string","string"] + } + } + ``` ### Destination Spec - - **Endpoint**: `/api/v1/project/:projectid/destinations/spec` - **Method**: GET - **Description**: Give spec based on destination type @@ -265,13 +302,13 @@ http://localhost:8000 - **Request Body**: ```json { - "type": "string", - "version": "string" + "type":"string", + "version": "string", } ``` - **Response**: ```json - { + { "success": "boolean", "message": "string", "data": { @@ -287,7 +324,7 @@ http://localhost:8000 // currently this is not avaialable in olake will build this -### Test Destination +### Test Destination - **Endpoint**: `/api/v1/project/:projectid/destinations/test` - **Method**: POST @@ -305,15 +342,17 @@ http://localhost:8000 - **Response**: ```json - { - "success": "boolean", - "message": "string", - "data": { - "type": "string", - "version": "string", - "config": "json" - } + { + "success": boolean, + "message": "string", + "data": { + "connection_result": { + "message": "string", + "status": "string" + }, + "logs": "json" } +} ``` ### Create Destination @@ -328,7 +367,7 @@ http://localhost:8000 "name": "string", "type": "string", "config": "json", - "version": "string" + "version":"string", } ``` - **Response**: @@ -341,7 +380,7 @@ http://localhost:8000 "name": "string", "type": "string", "config": "json", - "version": "string" // to create a job same version of destination and same version of source required + "version":"string", // to create a job same version of destination and same version of source required } } ``` @@ -354,7 +393,7 @@ http://localhost:8000 - **Headers**: `Authorization: Bearer ` - **Response**: ```json - { +{ "success": "boolean", "message": "string", "data": [ @@ -395,7 +434,7 @@ http://localhost:8000 "name": "string", "type": "string", "config": "json", - "version": "string" + "version":"string", } ``` - **Response**: @@ -406,7 +445,7 @@ http://localhost:8000 "data": { "name": "string", "type": "string", - "version": "string", + "version":"string", "config": "json" } } @@ -421,14 +460,14 @@ http://localhost:8000 - **Response**: ```json -{ - // NOTE: this is only soft delete not hard - "success": "boolean", - "message": "string", - "data": { - "name": "string" + + { // NOTE: this is only soft delete not hard + "success": "boolean", + "message": "string", + "data": { + "name": "string", + } } -} ``` ## Jobs @@ -454,10 +493,10 @@ http://localhost:8000 "name": "string", "type": "string", "config": "string", - "version": "string" + "version": "string", }, "frequency": "string", - "streams_config": "json" + "streams_config": "json", } ``` @@ -467,14 +506,14 @@ http://localhost:8000 "success": "boolean", "message": "string", "data": { - // request body as it is + // request body as it is } } ``` ### Get All Jobs -- **Endpoint**: `/api/v1/project/:projectid/jobs` // also use endpoint for filter such as /jobs/dest_id="some_id" or /jobs/source_id="some_id" +- **Endpoint**: `/api/v1/project/:projectid/jobs` // also use endpoint for filter such as /jobs/dest_id="some_id" or /jobs/source_id="some_id" - **Method**: GET - **Description**: Retrieve all jobs - **Headers**: `Authorization: Bearer ` @@ -491,23 +530,24 @@ http://localhost:8000 "name": "string", "type": "string", "config": "json", - "version": "string" + "version": "string", }, "destination": { "name": "string", "type": "string", "config": "json", - "version": "string" + "version": "string", }, - "streams_config": "json", + "streams_config":"json", "frequency": "string", "last_run_time": "timestamp", "last_run_state": "string", "created_at": "timestamp", "updated_at": "timestamp", - "created_by": "string", // username - "updated_by": "string" // username - // can also send state but if it is required + "activate": "boolean", + "created_by": "string", // username + "updated_by": "string", // username + // can also send state but if it is required } ] } @@ -528,17 +568,17 @@ http://localhost:8000 "name": "string", "type": "string", "config": "json", - "version": "string" + "version": "string", }, "destination": { "name": "string", "type": "string", "config": "json", - "version": "string" + "version": "string", }, "frequency": "string", "streams_config": "json", - "activate": "boolean" // send this to activate or deactivate job + "activate": "boolean", // send this to activate or deactivate job } ``` @@ -553,21 +593,23 @@ http://localhost:8000 "name": "string", "type": "string", "config": "json", - "version": "string" + "version": "string", }, "destination": { "name": "string", "type": "string", "config": "json", - "version": "string" + "version": "string", }, "frequency": "string", "streams_config": "json", - "activate": "boolean" + "activate": "boolean", } } ``` + + ### Delete Job - **Endpoint**: `/api/v1/project/:projectid/jobs/:id` @@ -578,11 +620,11 @@ http://localhost:8000 ```json { - "success": "boolean", - "message": "string", - "data": { - "name": "boolean" - } + "success": "boolean", + "message": "string", + "data": { + "name": "boolean" + } } ``` @@ -673,15 +715,14 @@ http://localhost:8000 "message": "string", "data": [ { - "id": "string", + "id":"string", "start_time": "timestamp", "runtime": "integer", "status": "string" - } + }, ] } ``` - ### cancel Job workflow - **Endpoint**: `/api/v1/project/:projectid/jobs/:jobid/cancel` @@ -701,21 +742,6 @@ http://localhost:8000 } ``` - ### Job Sync - -- **Endpoint**: `/api/v1/project/:projectid/jobs/:id/sync` -- **Method**: POST -- **Description**: Sync the job -- **Headers**: `Authorization: Bearer ` -- **Response**: - - ```json - { - "success": "boolean", - "message": "string", - "data": null - } - ``` ###Activate/Inactivate Job @@ -743,6 +769,7 @@ http://localhost:8000 } ``` + - **Endpoint**: `/api/v1/project/:projectid/jobs/:jobid/task/:id/logs` - **Method**: GET - **Description**: Give the Logs of that particular Job @@ -755,7 +782,7 @@ http://localhost:8000 "success": "boolean", "message": "string", "data": { - "task_logs": "json" + "task_logs":"json" } } ``` diff --git a/docker-compose.yml b/docker-compose.yml index a184df8b..4adf42fd 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -90,7 +90,7 @@ services: restart: "no" temporal-worker: - image: ${CONTAINER_REGISTRY_BASE:-registry-1.docker.io}/olakego/ui-worker:latest + image: ${CONTAINER_REGISTRY_BASE:-registry-1.docker.io}/olakego/ui-worker:stag-latest pull_policy: always container_name: olake-temporal-worker networks: @@ -100,6 +100,7 @@ services: - <<: *workerConfigVolumeDetails environment: <<: *sharedEnvs + OLAKE_CALLBACK_URL: "http://olake-ui:8000/internal/worker/callback" depends_on: temporal: condition: service_started # Or service_healthy if temporal has a healthcheck diff --git a/release.sh b/release.sh index b82ea85a..39cd6400 100755 --- a/release.sh +++ b/release.sh @@ -92,59 +92,6 @@ function release_frontend() { echo "$(chalk green "Frontend release successful for $image_name version $tag_version")" } -function release_worker() { - local version=$1 - local platform=$2 - local environment=$3 # Could be 'dev', 'staging', 'master', etc. - local image_name="$DOCKER_REPO_WORKER" # Use a specific repo name for the worker, e.g., yourdockerhubuser/olake-worker - - # Set tag based on environment - local tag_version="" - local latest_tag="" - - case "$environment" in - "master") - tag_version="${version}" - latest_tag="latest" - ;; - "staging") - tag_version="stag-${version}" - latest_tag="stag-latest" - ;; - "dev"|*) # Default to dev prefix if not master or staging - tag_version="dev-${version}" - latest_tag="dev-latest" - ;; - esac - - # # It's good practice to ensure DOCKER_REPO_WORKER is set - # if [ -z "$DOCKER_REPO_WORKER" ]; then - # echo "$(chalk red "Error: DOCKER_REPO_WORKER environment variable is not set.")" - # return 1 # Or use fail "DOCKER_REPO_WORKER not set" if 'fail' is a global helper - # fi - - echo "Logging into Docker (if not already logged in by a previous function call)..." - # Assuming DOCKER_LOGIN and DOCKER_PASSWORD are set globally or passed - # If login is handled globally at the start of the script, this might be redundant - # but doesn't hurt to ensure. - docker login -u="$DOCKER_LOGIN" -p="$DOCKER_PASSWORD" || fail "Docker login failed for $DOCKER_LOGIN" - - echo "**** Releasing worker image $image_name for platforms [$platform] with version [$tag_version] ****" - - echo "Building and pushing worker Docker image..." - - # Assuming worker.Dockerfile is in the project root (context '.') - # If worker.Dockerfile or its context (e.g., server files) are elsewhere, adjust paths. - docker buildx build --platform "$platform" --push \ - -t "${image_name}:${tag_version}" \ - -t "${image_name}:${latest_tag}" \ - --build-arg ENVIRONMENT="$environment" \ - --build-arg APP_VERSION="$version" \ - -f worker.Dockerfile . || fail "Worker build failed. Exiting..." - - echo "$(chalk green "Worker release successful for $image_name version $tag_version")" -} - SEMVER_EXPRESSION='v([0-9]+\.[0-9]+\.[0-9]+)$' STAGING_VERSION_EXPRESSION='v([0-9]+\.[0-9]+\.[0-9]+)-[a-zA-Z0-9_.-]+' @@ -193,6 +140,5 @@ chalk green "=== Release version: $VERSION ===" # Call the frontend-only release function release_frontend "$VERSION" "$platform" "$ENVIRONMENT" -release_worker "$VERSION" "$platform" "$ENVIRONMENT" echo "$(chalk green "✅ Frontend release process completed successfully")" \ No newline at end of file diff --git a/server/README.md b/server/README.md index d781c470..1e67f05e 100644 --- a/server/README.md +++ b/server/README.md @@ -13,7 +13,7 @@ Olake Server is a RESTful API service built with the Beego framework that manage ### 1. Clone the Repository ```bash - git clone https://github.com/datazip-inc/olake-frontend.git + git clone https://github.com/datazip-inc/olake-ui.git ``` ### 2. Configure Application Settings (Auth only works when session enabled) diff --git a/server/cmd/temporal-worker/main.go b/server/cmd/temporal-worker/main.go deleted file mode 100644 index f5b8c253..00000000 --- a/server/cmd/temporal-worker/main.go +++ /dev/null @@ -1,68 +0,0 @@ -package main - -import ( - "os" - "os/signal" - "syscall" - - "github.com/beego/beego/v2/core/config" - "github.com/beego/beego/v2/core/logs" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/docker" - "github.com/datazip/olake-frontend/server/internal/logger" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "github.com/datazip/olake-frontend/server/internal/temporal" - "github.com/datazip/olake-frontend/server/utils" -) - -func main() { - // Initialize telemetry - telemetry.InitTelemetry() - // check constants - constants.Init() - - // init logger - logsdir, _ := config.String("logsdir") - logger.InitLogger(logsdir) - - // init log cleaner - utils.InitLogCleaner(docker.GetDefaultConfigDir(), utils.GetLogRetentionPeriod()) - - // init database - err := database.Init() - if err != nil { - logs.Critical("Failed to initialize database: %s", err) - os.Exit(1) - } - - logs.Info("Starting Olake Temporal worker...") - - // Create a new worker - worker, err := temporal.NewWorker() - if err != nil { - logs.Critical("Failed to create worker: %v", err) - os.Exit(1) - } - - // Start the worker in a goroutine - go func() { - err := worker.Start() - if err != nil { - logs.Critical("Failed to start worker: %v", err) - os.Exit(1) - } - }() - - // Setup signal handling for graceful shutdown - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM) - - // Wait for termination signal - sig := <-signalChan - logs.Info("Received signal %v, shutting down worker...", sig) - - // Stop the worker - worker.Stop() - logs.Info("Worker stopped. Goodbye!") -} diff --git a/server/conf/app.conf b/server/conf/app.conf index 812c93ab..e8601038 100644 --- a/server/conf/app.conf +++ b/server/conf/app.conf @@ -3,6 +3,7 @@ httpport = ${HTTP_PORT||8000} runmode = ${RUN_MODE||dev} copyrequestbody = ${COPY_REQUEST_BODY||true} postgresdb = ${POSTGRES_DB} +encryptionkey = ${OLAKE_SECRET_KEY} OLAKE_POSTGRES_USER = ${OLAKE_POSTGRES_USER||temporal} OLAKE_POSTGRES_PASSWORD = ${OLAKE_POSTGRES_PASSWORD||temporal} OLAKE_POSTGRES_HOST = ${OLAKE_POSTGRES_HOST||postgresql} diff --git a/server/go.mod b/server/go.mod index fb68dc28..2be44495 100644 --- a/server/go.mod +++ b/server/go.mod @@ -1,40 +1,111 @@ -module github.com/datazip/olake-frontend/server +module github.com/datazip-inc/olake-ui/server go 1.24.2 require github.com/beego/beego/v2 v2.3.8 require ( - github.com/aws/aws-sdk-go-v2/config v1.31.0 - github.com/aws/aws-sdk-go-v2/service/ecr v1.49.0 + github.com/apache/spark-connect-go/v35 v35.0.0-20250317154112-ffd832059443 + github.com/aws/aws-sdk-go-v2/config v1.29.17 + github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5 github.com/aws/aws-sdk-go-v2/service/kms v1.41.1 + github.com/docker/docker v28.3.3+incompatible + github.com/go-playground/validator/v10 v10.27.0 github.com/lib/pq v1.10.9 github.com/oklog/ulid v1.3.1 + github.com/rs/zerolog v1.34.0 github.com/spf13/viper v1.20.1 + github.com/testcontainers/testcontainers-go v0.39.0 go.temporal.io/sdk v1.34.0 - golang.org/x/crypto v0.35.0 - golang.org/x/mod v0.27.0 + golang.org/x/crypto v0.41.0 + golang.org/x/mod v0.26.0 ) require ( - github.com/aws/aws-sdk-go-v2 v1.38.0 - github.com/aws/aws-sdk-go-v2/credentials v1.18.4 // indirect - github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 // indirect - github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 // indirect + github.com/aws/aws-sdk-go-v2 v1.39.2 + github.com/aws/aws-sdk-go-v2/credentials v1.17.70 // indirect + github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 // indirect + github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 // indirect + github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 // indirect github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 // indirect - github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 // indirect - github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 // indirect - github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 // indirect - github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 // indirect - github.com/aws/smithy-go v1.22.5 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 // indirect + github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 // indirect + github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 // indirect + github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 // indirect + github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 // indirect + github.com/aws/smithy-go v1.23.0 // indirect +) + +require ( + cloud.google.com/go/compute/metadata v0.7.0 // indirect + dario.cat/mergo v1.0.2 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/apache/arrow-go/v18 v18.2.0 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/ebitengine/purego v0.8.4 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/gabriel-vasile/mimetype v1.4.8 // indirect + github.com/go-errors/errors v1.5.1 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-playground/locales v0.14.1 // indirect + github.com/go-playground/universal-translator v0.18.1 // indirect + github.com/goccy/go-json v0.10.5 // indirect + github.com/google/flatbuffers v25.2.10+incompatible // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/klauspost/cpuid/v2 v2.2.10 // indirect + github.com/leodido/go-urn v1.4.0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.10 // indirect + github.com/mattn/go-colorable v0.1.13 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pierrec/lz4/v4 v4.1.22 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/shirou/gopsutil/v4 v4.25.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + github.com/zeebo/xxh3 v1.0.2 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk/metric v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + go.opentelemetry.io/proto/otlp v1.8.0 // indirect + golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect + golang.org/x/oauth2 v0.30.0 // indirect + golang.org/x/tools v0.35.0 // indirect + golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da // indirect ) require ( github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect - github.com/davecgh/go-spew v1.1.1 // indirect + github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a // indirect github.com/fsnotify/fsnotify v1.8.0 // indirect github.com/go-viper/mapstructure/v2 v2.3.0 // indirect @@ -42,38 +113,39 @@ require ( github.com/golang/mock v1.6.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect + github.com/jmoiron/sqlx v1.4.0 github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/nexus-rpc/sdk-go v0.3.0 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect - github.com/pmezard/go-difflib v1.0.0 // indirect + github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.19.0 // indirect github.com/prometheus/client_model v0.5.0 // indirect github.com/prometheus/common v0.48.0 // indirect github.com/prometheus/procfs v0.12.0 // indirect - github.com/robfig/cron v1.2.0 + github.com/robfig/cron v1.2.0 // indirect github.com/sagikazarmark/locafero v0.7.0 // indirect github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 // indirect github.com/sourcegraph/conc v0.3.0 // indirect - github.com/spf13/afero v1.12.0 // indirect + github.com/spf13/afero v1.14.0 // indirect github.com/spf13/cast v1.7.1 // indirect github.com/spf13/pflag v1.0.6 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/stretchr/testify v1.10.0 // indirect + github.com/stretchr/testify v1.11.1 github.com/subosito/gotenv v1.6.0 // indirect github.com/valyala/bytebufferpool v1.0.0 // indirect go.temporal.io/api v1.46.0 go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/net v0.36.0 // indirect - golang.org/x/sync v0.11.0 // indirect - golang.org/x/sys v0.30.0 // indirect - golang.org/x/text v0.22.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.16.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/text v0.28.0 // indirect golang.org/x/time v0.8.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 // indirect - google.golang.org/grpc v1.67.3 // indirect - google.golang.org/protobuf v1.36.5 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect + google.golang.org/grpc v1.75.0 // indirect + google.golang.org/protobuf v1.36.8 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/server/go.sum b/server/go.sum index 1e0c17a7..4110f958 100644 --- a/server/go.sum +++ b/server/go.sum @@ -1,50 +1,94 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU= +cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/aws/aws-sdk-go-v2 v1.38.0 h1:UCRQ5mlqcFk9HJDIqENSLR3wiG1VTWlyUfLDEvY7RxU= -github.com/aws/aws-sdk-go-v2 v1.38.0/go.mod h1:9Q0OoGQoboYIAJyslFyF1f5K1Ryddop8gqMhWx/n4Wg= -github.com/aws/aws-sdk-go-v2/config v1.31.0 h1:9yH0xiY5fUnVNLRWO0AtayqwU1ndriZdN78LlhruJR4= -github.com/aws/aws-sdk-go-v2/config v1.31.0/go.mod h1:VeV3K72nXnhbe4EuxxhzsDc/ByrCSlZwUnWH52Nde/I= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4 h1:IPd0Algf1b+Qy9BcDp0sCUcIWdCQPSzDoMK3a8pcbUM= -github.com/aws/aws-sdk-go-v2/credentials v1.18.4/go.mod h1:nwg78FjH2qvsRM1EVZlX9WuGUJOL5od+0qvm0adEzHk= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3 h1:GicIdnekoJsjq9wqnvyi2elW6CGMSYKhdozE7/Svh78= -github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.18.3/go.mod h1:R7BIi6WNC5mc1kfRM7XM/VHC3uRWkjc396sfabq4iOo= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3 h1:o9RnO+YZ4X+kt5Z7Nvcishlz0nksIt2PIzDglLMP0vA= -github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.3/go.mod h1:+6aLJzOG1fvMOyzIySYjOFjcguGvVRL68R+uoRencN4= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3 h1:joyyUFhiTQQmVK6ImzNU9TQSNRNeD9kOklqTzyk5v6s= -github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.3/go.mod h1:+vNIyZQP3b3B1tSLI0lxvrU9cfM7gpdRXMFfm67ZcPc= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/andybalholm/brotli v1.1.1 h1:PR2pgnyFznKEugtsUo0xLdDop5SKXd5Qf5ysW+7XdTA= +github.com/andybalholm/brotli v1.1.1/go.mod h1:05ib4cKhjx3OQYUY22hTVd34Bc8upXjOLL2rKwwZBoA= +github.com/apache/arrow-go/v18 v18.2.0 h1:QhWqpgZMKfWOniGPhbUxrHohWnooGURqL2R2Gg4SO1Q= +github.com/apache/arrow-go/v18 v18.2.0/go.mod h1:Ic/01WSwGJWRrdAZcxjBZ5hbApNJ28K96jGYaxzzGUc= +github.com/apache/spark-connect-go/v35 v35.0.0-20250317154112-ffd832059443 h1:pA4aHBVygvcQZuXVSJg2kH3z0rZO3M/YJUyUuPX82ko= +github.com/apache/spark-connect-go/v35 v35.0.0-20250317154112-ffd832059443/go.mod h1:ODlxb8YN0y/JyS7h+vhz+afnQ+beSkYTqDHYtg2T6E8= +github.com/apache/thrift v0.21.0 h1:tdPmh/ptjE1IJnhbhrcl2++TauVjy242rkV/UzJChnE= +github.com/apache/thrift v0.21.0/go.mod h1:W1H8aR/QRtYNvrPeFXBtobyRkd0/YVhTc6i07XIAgDw= +github.com/aws/aws-sdk-go-v2 v1.39.2 h1:EJLg8IdbzgeD7xgvZ+I8M1e0fL0ptn/M47lianzth0I= +github.com/aws/aws-sdk-go-v2 v1.39.2/go.mod h1:sDioUELIUO9Znk23YVmIk86/9DOpkbyyVb1i/gUNFXY= +github.com/aws/aws-sdk-go-v2/config v1.29.17 h1:jSuiQ5jEe4SAMH6lLRMY9OVC+TqJLP5655pBGjmnjr0= +github.com/aws/aws-sdk-go-v2/config v1.29.17/go.mod h1:9P4wwACpbeXs9Pm9w1QTh6BwWwJjwYvJ1iCt5QbCXh8= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70 h1:ONnH5CM16RTXRkS8Z1qg7/s2eDOhHhaXVd72mmyv4/0= +github.com/aws/aws-sdk-go-v2/credentials v1.17.70/go.mod h1:M+lWhhmomVGgtuPOhO85u4pEa3SmssPTdcYpP/5J/xc= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32 h1:KAXP9JSHO1vKGCr5f4O6WmlVKLFFXgWYAGoJosorxzU= +github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.32/go.mod h1:h4Sg6FQdexC1yYG9RDnOvLbW1a/P986++/Y/a+GyEM8= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9 h1:se2vOWGD3dWQUtfn4wEjRQJb1HK1XsNIt825gskZ970= +github.com/aws/aws-sdk-go-v2/internal/configsources v1.4.9/go.mod h1:hijCGH2VfbZQxqCDN7bwz/4dzxV+hkyhjawAtdPWKZA= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9 h1:6RBnKZLkJM4hQ+kN6E7yWFveOTg8NLPHAkqrs4ZPlTU= +github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.7.9/go.mod h1:V9rQKRmK7AWuEsOMnHzKj8WyrIir1yUJbZxDuZLFvXI= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3 h1:bIqFDwgGXXN1Kpp99pDOdKMTTb5d2KyU5X/BZxjOkRo= github.com/aws/aws-sdk-go-v2/internal/ini v1.8.3/go.mod h1:H5O/EsxDWyU+LP/V8i5sm8cxoZgc2fdNR9bxlOFrQTo= -github.com/aws/aws-sdk-go-v2/service/ecr v1.49.0 h1:NgkSYzgM3UhdSrXUKkl49FhbIPpNguZE4EXEGRhDcEU= -github.com/aws/aws-sdk-go-v2/service/ecr v1.49.0/go.mod h1:bi1dAg6vk8KC8nyf6DjQ3dkNJbzTirMSmZHbcRNa2vE= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0 h1:6+lZi2JeGKtCraAj1rpoZfKqnQ9SptseRZioejfUOLM= -github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.13.0/go.mod h1:eb3gfbVIxIoGgJsi9pGne19dhCBpK6opTYpQqAmdy44= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3 h1:ieRzyHXypu5ByllM7Sp4hC5f/1Fy5wqxqY0yB85hC7s= -github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.13.3/go.mod h1:O5ROz8jHiOAKAwx179v+7sHMhfobFVi6nZt8DEyiYoM= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5 h1:jzjNyiIrXJHumV1hwofcQLpIZtcDw+vPQL00rLI3s4g= +github.com/aws/aws-sdk-go-v2/service/ecr v1.50.5/go.mod h1:UtPKcYVHY6RrV9EaaM1KZGNaf9dgviFdsT6xoFMLQsM= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4 h1:CXV68E2dNqhuynZJPB80bhPQwAKqBWVer887figW6Jc= +github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.12.4/go.mod h1:/xFi9KtvBXP97ppCz1TAEvU1Uf66qvid89rbem3wCzQ= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17 h1:t0E6FzREdtCsiLIoLCWsYliNsRBgyGD/MCK571qk4MI= +github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.12.17/go.mod h1:ygpklyoaypuyDvOM5ujWGrYWpAK3h7ugnmKCU/76Ys4= github.com/aws/aws-sdk-go-v2/service/kms v1.41.1 h1:dkaX98cOXw4EgqpDXPqrVVLjsPR9T24wA2TcjrQiank= github.com/aws/aws-sdk-go-v2/service/kms v1.41.1/go.mod h1:Pqd9k4TuespkireN206cK2QBsaBTL6X+VPAez5Qcijk= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0 h1:Mc/MKBf2m4VynyJkABoVEN+QzkfLqGj0aiJuEe7cMeM= -github.com/aws/aws-sdk-go-v2/service/sso v1.28.0/go.mod h1:iS5OmxEcN4QIPXARGhavH7S8kETNL11kym6jhoS7IUQ= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0 h1:6csaS/aJmqZQbKhi1EyEMM7yBW653Wy/B9hnBofW+sw= -github.com/aws/aws-sdk-go-v2/service/ssooidc v1.33.0/go.mod h1:59qHWaY5B+Rs7HGTuVGaC32m0rdpQ68N8QCN3khYiqs= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0 h1:MG9VFW43M4A8BYeAfaJJZWrroinxeTi2r3+SnmLQfSA= -github.com/aws/aws-sdk-go-v2/service/sts v1.37.0/go.mod h1:JdeBDPgpJfuS6rU/hNglmOigKhyEZtBmbraLE4GK1J8= -github.com/aws/smithy-go v1.22.5 h1:P9ATCXPMb2mPjYBgueqJNCA5S9UfktsW0tTxi+a7eqw= -github.com/aws/smithy-go v1.22.5/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5 h1:AIRJ3lfb2w/1/8wOOSqYb9fUKGwQbtysJ2H1MofRUPg= +github.com/aws/aws-sdk-go-v2/service/sso v1.25.5/go.mod h1:b7SiVprpU+iGazDUqvRSLf5XmCdn+JtT1on7uNL6Ipc= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3 h1:BpOxT3yhLwSJ77qIY3DoHAQjZsc4HEGfMCE4NGy3uFg= +github.com/aws/aws-sdk-go-v2/service/ssooidc v1.30.3/go.mod h1:vq/GQR1gOFLquZMSrxUK/cpvKCNVYibNyJ1m7JrU88E= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0 h1:NFOJ/NXEGV4Rq//71Hs1jC/NvPs1ezajK+yQmkwnPV0= +github.com/aws/aws-sdk-go-v2/service/sts v1.34.0/go.mod h1:7ph2tGpfQvwzgistp2+zga9f+bCjlQJPkPUmMgDSD7w= +github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE= +github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI= github.com/beego/beego/v2 v2.3.8 h1:wplhB1pF4TxR+2SS4PUej8eDoH4xGfxuHfS7wAk9VBc= github.com/beego/beego/v2 v2.3.8/go.mod h1:8vl9+RrXqvodrl9C8yivX1e6le6deCK6RWeq8R7gTTg= github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/go-systemd/v22 v22.5.0/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM= +github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/docker v28.3.3+incompatible h1:Dypm25kh4rmk49v1eiVbsAtpAsYURjYkaKubwuBdxEI= +github.com/docker/docker v28.3.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/ebitengine/purego v0.8.4 h1:CF7LEKg5FFOsASUj0+QwaXf8Ht6TlFxg09+S9wz0omw= +github.com/ebitengine/purego v0.8.4/go.mod h1:iIjxzd6CiRiOG0UyXP+V1+jWqUXVjPKLAI0mRfJZTmQ= github.com/elazarl/go-bindata-assetfs v1.0.1 h1:m0kkaHRKEu7tUIUFVwhGGGYClXvyl4RE03qmvRTNfbw= github.com/elazarl/go-bindata-assetfs v1.0.1/go.mod h1:v+YaWX3bdea5J/mo8dSETolEo7R71Vk1u8bnjau5yw4= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -53,17 +97,41 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a h1:yDWHCSQ40h88yih2JAcL6Ls/kVkSE8GFACTGVnMPruw= github.com/facebookgo/clock v0.0.0-20150410010913-600d898af40a/go.mod h1:7Ga40egUymuWXxAe151lTNnCv97MddSOVsjpPPkityA= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/frankban/quicktest v1.14.6 h1:7Xjx+VpznH+oBnejlPUj8oUpdxnVs4f8XU8WnHkI4W8= github.com/frankban/quicktest v1.14.6/go.mod h1:4ptaffx2x8+WTWXmUCuVU6aPUX1/Mz7zb5vbUoiM6w0= github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM= +github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8= +github.com/go-errors/errors v1.5.1 h1:ZwEMSLRCapFLflTpT7NKaAc7ukJ8ZPEjzlxt8rPN8bk= +github.com/go-errors/errors v1.5.1/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s= +github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA= +github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY= +github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY= +github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY= +github.com/go-playground/validator/v10 v10.27.0 h1:w8+XrWVMhGkxOaaowyKH35gFydVHOvC0/uWoy2Fzwn4= +github.com/go-playground/validator/v10 v10.27.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo= github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-viper/mapstructure/v2 v2.3.0 h1:27XbWsHIqhbdR5TIC911OfYvgSaW93HM+dX7970Q7jk= github.com/go-viper/mapstructure/v2 v2.3.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM= +github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4= +github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M= +github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -75,19 +143,32 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.4 h1:yAGX7huGHXlcLOEtBnF4w7FQwA26wojNCwOYAEhLjQM= +github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/flatbuffers v25.2.10+incompatible h1:F3vclr7C3HpB1k9mxCGRMXq6FdUalZ6H/pNX4FP1v0Q= +github.com/google/flatbuffers v25.2.10+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 h1:UH//fgunKIs4JdUbpDl1VZCDaL56wXCB/5+wF6uHfaI= github.com/grpc-ecosystem/go-grpc-middleware v1.4.0/go.mod h1:g5qyo/la0ALbONm6Vbp88Yd8NsDy6rZz+RcrMPxvld8= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2 h1:8Tjv8EJ+pM1xP8mK6egEbD1OgnVTyacbefKhmbLhIhU= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.2/go.mod h1:pkJQ2tZHJ0aFOVEEot6oZmaVEZcRme73eIFmhiVuRWs= github.com/hashicorp/golang-lru v0.5.4 h1:YDjusn29QI/Das2iO9M0BHnIbxPeyuCHsjMW+lJfyTc= github.com/hashicorp/golang-lru v0.5.4/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4= +github.com/jmoiron/sqlx v1.4.0 h1:1PLqN7S1UYp5t4SrVVnt4nUVNemrDAtxlulVe+Qgm3o= +github.com/jmoiron/sqlx v1.4.0/go.mod h1:ZrZ7UsYB/weZdl2Bxg6jCRO9c3YHl8r3ahlKmRT4JLY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/klauspost/asmfmt v1.3.2 h1:4Ri7ox3EwapiOjCki+hw14RyKk201CN4rzyCJRFLpK4= +github.com/klauspost/asmfmt v1.3.2/go.mod h1:AG8TuvYojzulgDAMCnYn50l/5QV3Bs/tp6j0HLHbNSE= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE= +github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= @@ -96,22 +177,68 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/leodido/go-urn v1.4.0 h1:WT9HwE9SGECu3lg4d/dIA+jxlljEa1/ffXKmRjqdmIQ= +github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjSBOlFxI= github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= +github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= +github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= +github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-sqlite3 v1.14.22/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= github.com/mattn/go-sqlite3 v1.14.27 h1:drZCnuvf37yPfs95E5jd9s3XhdVWLal+6BOK6qrv6IU= github.com/mattn/go-sqlite3 v1.14.27/go.mod h1:Uh1q+B4BYcTPb+yiD3kU8Ct7aC0hY9fxUwlHK0RXw+Y= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8 h1:AMFGa4R4MiIpspGNG7Z948v4n35fFGB3RR3G/ry4FWs= +github.com/minio/asm2plan9s v0.0.0-20200509001527-cdd76441f9d8/go.mod h1:mC1jAcsrzbxHt8iiaC+zU4b1ylILSosueou12R++wfY= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3 h1:+n/aFZefKZp7spd8DFdX7uMikMLXX4oubIzJF4kv/wI= +github.com/minio/c2goasm v0.0.0-20190812172519-36a3d3bbc4f3/go.mod h1:RagcQ7I8IeTMnF8JTXieKnO4Z6JCsikNEzj0DwauVzE= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= github.com/nexus-rpc/sdk-go v0.3.0 h1:Y3B0kLYbMhd4C2u00kcYajvmOrfozEtTV/nHSnV57jA= github.com/nexus-rpc/sdk-go v0.3.0/go.mod h1:TpfkM2Cw0Rlk9drGkoiSMpFqflKTiQLWUNyKJjF8mKQ= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= +github.com/pierrec/lz4/v4 v4.1.22 h1:cKFw6uJDK+/gfw5BcDL0JL5aBsAFdsIT18eRtLj7VIU= +github.com/pierrec/lz4/v4 v4.1.22/go.mod h1:gZWDp/Ze/IJXGXf23ltt2EXimqmTUXEy0GFuRQyBid4= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U= +github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/prometheus/client_golang v1.19.0 h1:ygXvpU1AoN1MhdzckN+PyD9QJOSD4x7kmXYlnfbA6JU= github.com/prometheus/client_golang v1.19.0/go.mod h1:ZRM9uEAypZakd+q/x7+gmsvXdURP+DABIEIjnmDdp+k= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -123,17 +250,24 @@ github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo= github.com/robfig/cron v1.2.0 h1:ZjScXvvxeQ63Dbyxy76Fj3AT3Ut0aKsyd2/tl3DTMuQ= github.com/robfig/cron v1.2.0/go.mod h1:JGuDeoQd7Z6yL4zQhZ3OPEVHB7fL6Ka6skscFHfmt2k= -github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/xid v1.6.0/go.mod h1:7XoLgs4eV+QndskICGsho+ADou8ySMSjJKDIan90Nz0= +github.com/rs/zerolog v1.34.0 h1:k43nTLIwcTVQAncfCw4KZ2VY6ukYoZaBPNOE8txlOeY= +github.com/rs/zerolog v1.34.0/go.mod h1:bJsvje4Z08ROH4Nhs5iH600c3IkWhwp44iRc54W6wYQ= github.com/sagikazarmark/locafero v0.7.0 h1:5MqpDsTGNDhY8sGp0Aowyf0qKsPrhewaLSsFaodPcyo= github.com/sagikazarmark/locafero v0.7.0/go.mod h1:2za3Cg5rMaTMoG/2Ulr9AwtFaIppKXTRYnozin4aB5k= github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18 h1:DAYUYH5869yV94zvCES9F51oYtN5oGlwjxJJz7ZCnik= github.com/shiena/ansicolor v0.0.0-20200904210342-c7312218db18/go.mod h1:nkxAfR/5quYxwPZhyDxgasBMnRtBZd0FCEpawpjMUFg= +github.com/shirou/gopsutil/v4 v4.25.6 h1:kLysI2JsKorfaFPcYmcJqbzROzsBWEOAtw6A7dIfqXs= +github.com/shirou/gopsutil/v4 v4.25.6/go.mod h1:PfybzyydfZcN+JMMjkF6Zb8Mq1A/VcogFFg7hj50W9c= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sourcegraph/conc v0.3.0 h1:OQTbbt6P72L20UqAkXXuLOj79LfEanQ+YQFNpLA9ySo= github.com/sourcegraph/conc v0.3.0/go.mod h1:Sdozi7LEKbFPqYX2/J+iBAM6HpqSLTASQIKqDmF7Mt0= -github.com/spf13/afero v1.12.0 h1:UcOPyRBYczmFn6yvphxkn9ZEOY65cpwGKb5mL36mrqs= -github.com/spf13/afero v1.12.0/go.mod h1:ZTlWwG4/ahT8W7T0WQ5uYmjI9duaLQGy3Q2OAl4sk/4= +github.com/spf13/afero v1.14.0 h1:9tH6MapGnn/j0eb0yIXiLjERO8RB6xIVZRDCX7PtqWA= +github.com/spf13/afero v1.14.0/go.mod h1:acJQ8t0ohCGuMN3O+Pv0V0hgMxNYDlvdk+VTfyZmbYo= github.com/spf13/cast v1.7.1 h1:cuNEagBQEHWN1FnbGEjCXL2szYEXqfJPbP2HNUaca9Y= github.com/spf13/cast v1.7.1/go.mod h1:ancEpBxwJDODSW/UG4rDrAqiKolqNNh2DX3mk86cAdo= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= @@ -148,15 +282,47 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= -github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= github.com/subosito/gotenv v1.6.0 h1:9NlTDc1FTs4qu0DDq7AEtTPNw6SVm7uBMsUCUjABIf8= github.com/subosito/gotenv v1.6.0/go.mod h1:Dk4QP5c2W3ibzajGcXpNraDfq2IrhjMIvMSWPKKo0FU= +github.com/testcontainers/testcontainers-go v0.39.0 h1:uCUJ5tA+fcxbFAB0uP3pIK3EJ2IjjDUHFSZ1H1UxAts= +github.com/testcontainers/testcontainers-go v0.39.0/go.mod h1:qmHpkG7H5uPf/EvOORKvS6EuDkBUPE3zpVGaH9NL7f8= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/valyala/bytebufferpool v1.0.0 h1:GqA5TC/0021Y/b9FG4Oi9Mr3q7XYx6KllzawFIhcdPw= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zeebo/assert v1.3.0 h1:g7C04CbJuIDKNPFHmsk4hwZDO5O+kntRxzaUoNXj+IQ= +github.com/zeebo/assert v1.3.0/go.mod h1:Pq9JiuJQpG8JLJdtkwrJESF0Foym2/D9XMU5ciN/wJ0= +github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= +github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/sdk/metric v1.38.0 h1:aSH66iL0aZqo//xXzQLYozmWrXxyFkBJ6qT5wthqPoM= +go.opentelemetry.io/otel/sdk/metric v1.38.0/go.mod h1:dg9PBnW9XdQ1Hd6ZnRz689CbtrUp0wMMs9iPcgT9EZA= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.8.0 h1:fRAZQDcAFHySxpJ1TwlA1cJ4tvcrw7nXl9xWWC8N5CE= +go.opentelemetry.io/proto/otlp v1.8.0/go.mod h1:tIeYOeNBU4cvmPqpaji1P+KbB4Oloai8wN4rWzRrFF0= go.temporal.io/api v1.46.0 h1:O1efPDB6O2B8uIeCDIa+3VZC7tZMvYsMZYQapSbHvCg= go.temporal.io/api v1.46.0/go.mod h1:iaxoP/9OXMJcQkETTECfwYq4cw/bj4nwov8b3ZLVnXM= go.temporal.io/sdk v1.34.0 h1:VLg/h6ny7GvLFVoQPqz2NcC93V9yXboQwblkRvZ1cZE= @@ -172,9 +338,11 @@ go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.35.0 h1:b15kiHdrGCHrP6LvwaQ3c03kgNhhiMgvlhxHQhmg2Xs= -golang.org/x/crypto v0.35.0/go.mod h1:dy7dXNW32cAb/6/PRuTNsix8T+vJAqvuIy5Bli/x0YQ= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk= +golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -182,8 +350,8 @@ golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHl golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.27.0 h1:kb+q2PyFnEADO2IEF935ehFUXlWiNjJWtRNgBLSfbxQ= -golang.org/x/mod v0.27.0/go.mod h1:rWI627Fq0DEoudcK+MBkNkCe0EetEaDSwJJkCcjpazc= +golang.org/x/mod v0.26.0 h1:EGMPT//Ezu+ylkCijjPc+f4Aih7sZvaAr+O3EHBxvZg= +golang.org/x/mod v0.26.0/go.mod h1:/j6NAhSk8iQ723BGAUyoAcn7SlD7s15Dp9Nd/SfeaFQ= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -193,33 +361,46 @@ golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.36.0 h1:vWF2fRbw4qslQsQzgFqZff+BItCvGFQqKzKIzx1rmoA= -golang.org/x/net v0.36.0/go.mod h1:bFmbeoIPfrw4sMHNhb4J9f6+tPziuGjq7Jk/38fxi1I= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI= +golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w= -golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw= +golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20211025201205-69cdffdb9359/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc= -golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM= -golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY= +golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng= +golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU= golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -232,28 +413,34 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= +golang.org/x/tools v0.35.0 h1:mBffYraMEf7aa0sB+NuKnuCy8qI/9Bughn8dC2Gu5r0= +golang.org/x/tools v0.35.0/go.mod h1:NKdj5HkL/73byiZSJjqJgKn3ep7KjFkBOkR/Hps3VPw= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da h1:noIWHXmPHxILtqtCOPIhSt0ABwskkZKjD3bXGnZGpNY= +golang.org/x/xerrors v0.0.0-20240903120638-7835f813f4da/go.mod h1:NDW/Ps6MPRej6fsCIbMTohpP40sJ/P/vI1MoTEGwX90= +gonum.org/v1/gonum v0.16.0 h1:5+ul4Swaf3ESvrOnidPp4GZbzf0mxVQpDCYUQE7OJfk= +gonum.org/v1/gonum v0.16.0/go.mod h1:fef3am4MQ93R2HHpKnLk4/Tbh/s0+wqD5nfa6Pnwy4E= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q= -google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8 h1:TqExAhdPaB60Ux47Cn0oLV07rGnxZzIsaRhQaqS666A= -google.golang.org/genproto/googleapis/rpc v0.0.0-20241223144023-3abc09e42ca8/go.mod h1:lcTa1sDdWEIHMWlITnIczmw5w60CF9ffkb8Z+DVmmjA= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 h1:BIRfGDEjiHRrk0QKZe3Xv2ieMhtgRGeLcZQ0mIVn4EY= +google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5/go.mod h1:j3QtIyytwqGr1JUDtYXwtMXWPKsEa5LtzIFN1Wn5WvE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 h1:eaY8u2EuxbRv7c3NiGK0/NedzVsCcV6hDuU5qPX5EGE= +google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5/go.mod h1:M4/wBTSeyLxupu3W3tJtOgB14jILAS/XWPSSa3TAlJc= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= -google.golang.org/grpc v1.67.3 h1:OgPcDAFKHnH8X3O4WcO4XUc8GRDeKsKReqbQtiCj7N8= -google.golang.org/grpc v1.67.3/go.mod h1:YGaHCc6Oap+FzBJTZLBzkGSYt/cvGPFTPxkn7QfSU8s= -google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= -google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/grpc v1.75.0 h1:+TW+dqTd2Biwe6KKfhE5JpiYIBWq865PhKGSXiivqt4= +google.golang.org/grpc v1.75.0/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v1.36.8 h1:xHScyCOEuuwZEc6UtSOvPbAT4zRh0xcNRYekJwfqyMc= +google.golang.org/protobuf v1.36.8/go.mod h1:fuxRtAxBytpl4zzqUh6/eyUujkJdNiuEkXntxiD/uRU= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= @@ -264,5 +451,7 @@ gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/server/internal/constants/constants.go b/server/internal/constants/constants.go index b8ef8738..99f557fe 100644 --- a/server/internal/constants/constants.go +++ b/server/internal/constants/constants.go @@ -23,8 +23,44 @@ var ( DefaultConfigDir = "/tmp/olake-config" DefaultLogRetentionPeriod = 30 DefaultSpecVersion = "v0.2.0" + // logging + EnvLogLevel = "LOG_LEVEL" + EnvLogFormat = "LOG_FORMAT" + OrderByUpdatedAtDesc = "-updated_at" + // Frontend index path key + FrontendIndexPath = "FRONTEND_INDEX_PATH" + TemporalTaskQueue = "OLAKE_DOCKER_TASK_QUEUE" + + // conf keys + ConfEncryptionKey = "encryptionkey" + ConfTemporalAddress = "TEMPORAL_ADDRESS" + ConfDeploymentMode = "DEPLOYMENT_MODE" + ConfRunMode = "runmode" + ConfContainerRegistryBase = "CONTAINER_REGISTRY_BASE" + // database keys + ConfPostgresDB = "postgresdb" + ConfOLakePostgresUser = "OLAKE_POSTGRES_USER" + ConfOLakePostgresPassword = "OLAKE_POSTGRES_PASSWORD" + ConfOLakePostgresHost = "OLAKE_POSTGRES_HOST" + ConfOLakePostgresPort = "OLAKE_POSTGRES_PORT" + ConfOLakePostgresDBname = "OLAKE_POSTGRES_DBNAME" + ConfOLakePostgresSslmode = "OLAKE_POSTGRES_SSLMODE" ) +// Supported database/source types +var SupportedSourceTypes = []string{ + "mysql", + "postgres", + "oracle", + "mongodb", +} + +// Supported database/source types +var SupportedDestinationTypes = []string{ + "parquet", + "iceberg", +} + var RequiredConfigVariable = []string{ "OLAKE_POSTGRES_USER", "OLAKE_POSTGRES_PASSWORD", @@ -37,13 +73,15 @@ var RequiredConfigVariable = []string{ func Init() { viper.AutomaticEnv() - + viper.SetDefault(EnvLogFormat, "console") + viper.SetDefault(EnvLogLevel, "info") viper.SetDefault("PORT", defaultPort) viper.SetDefault("BUILD", version) viper.SetDefault("COMMITSHA", commitsha) viper.SetDefault("RELEASE_CHANNEL", releasechannel) viper.SetDefault("BASE_HOST", defaultBaseHost) viper.SetDefault("BASE_URL", fmt.Sprintf("%s:%v", viper.GetString("BASE_HOST"), viper.GetString("PORT"))) + viper.SetDefault(FrontendIndexPath, "/opt/frontend/dist/index.html") checkForRequiredVariables(RequiredConfigVariable) diff --git a/server/internal/constants/messages.go b/server/internal/constants/messages.go new file mode 100644 index 00000000..ec9245fc --- /dev/null +++ b/server/internal/constants/messages.go @@ -0,0 +1,20 @@ +package constants + +import "errors" + +// Common error messages +var ( + // User related errors + ErrUserNotFound = errors.New("user not found") + ErrInvalidCredentials = errors.New("invalid credentials") + ErrUserAlreadyExists = errors.New("user already exists") + ErrPasswordProcessing = errors.New("failed to process password") + + // Source related errors + ErrSourceNotFound = errors.New("source not found") +) + +// Validation messages +const ( + ValidationInvalidRequestFormat = "Invalid request format" +) diff --git a/server/internal/database/postgres.go b/server/internal/database/database.go similarity index 64% rename from server/internal/database/postgres.go rename to server/internal/database/database.go index cda24135..c6f1c939 100644 --- a/server/internal/database/postgres.go +++ b/server/internal/database/database.go @@ -6,30 +6,35 @@ import ( "net/url" "github.com/beego/beego/v2/client/orm" - "github.com/beego/beego/v2/core/logs" "github.com/beego/beego/v2/server/web" _ "github.com/beego/beego/v2/server/web/session/postgres" // required for session _ "github.com/lib/pq" // required for registering driver - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) -func Init() error { +type Database struct { + ormer orm.Ormer +} + +func Init() (*Database, error) { // register driver uri, err := BuildPostgresURIFromConfig() if err != nil { - return fmt.Errorf("failed to build postgres uri: %s", err) + return nil, fmt.Errorf("failed to build postgres uri: %s", err) } + err = orm.RegisterDriver("postgres", orm.DRPostgres) if err != nil { - return fmt.Errorf("failed to register postgres driver: %s", err) + return nil, fmt.Errorf("failed to register postgres driver: %s", err) } // register database err = orm.RegisterDataBase("default", "postgres", uri) if err != nil { - return fmt.Errorf("failed to register postgres database: %s", err) + return nil, fmt.Errorf("failed to register postgres database: %s", err) } // enable session by default @@ -54,8 +59,9 @@ func Init() error { // Create tables if they do not exist err = orm.RunSyncdb("default", false, true) if err != nil { - return fmt.Errorf("failed to sync database schema: %s", err) + return nil, fmt.Errorf("failed to sync database schema: %s", err) } + // Add session table if sessions are enabled if web.BConfig.WebConfig.Session.SessionOn { _, err = orm.NewOrm().Raw(`CREATE TABLE IF NOT EXISTS session ( @@ -65,28 +71,28 @@ func Init() error { );`).Exec() if err != nil { - return fmt.Errorf("failed to create session table: %s", err) + return nil, fmt.Errorf("failed to create session table: %s", err) } } - return nil + return &Database{ormer: orm.NewOrm()}, nil } // BuildPostgresURIFromConfig reads POSTGRES_DB_HOST, POSTGRES_DB_PORT, etc. from app.conf // and constructs the Postgres connection URI. func BuildPostgresURIFromConfig() (string, error) { - logs.Info("Building Postgres URI from config") + logger.Info("Building Postgres URI from config") // First, check if postgresdb is set directly - if dsn, err := web.AppConfig.String("postgresdb"); err == nil && dsn != "" { + if dsn, err := web.AppConfig.String(constants.ConfPostgresDB); err == nil && dsn != "" { return dsn, nil } - user, _ := web.AppConfig.String("OLAKE_POSTGRES_USER") - password, _ := web.AppConfig.String("OLAKE_POSTGRES_PASSWORD") - host, _ := web.AppConfig.String("OLAKE_POSTGRES_HOST") - port, _ := web.AppConfig.String("OLAKE_POSTGRES_PORT") - dbName, _ := web.AppConfig.String("OLAKE_POSTGRES_DBNAME") - sslMode, _ := web.AppConfig.String("OLAKE_POSTGRES_SSLMODE") + user, _ := web.AppConfig.String(constants.ConfOLakePostgresUser) + password, _ := web.AppConfig.String(constants.ConfOLakePostgresPassword) + host, _ := web.AppConfig.String(constants.ConfOLakePostgresHost) + port, _ := web.AppConfig.String(constants.ConfOLakePostgresPort) + dbName, _ := web.AppConfig.String(constants.ConfOLakePostgresDBname) + sslMode, _ := web.AppConfig.String(constants.ConfOLakePostgresSslmode) u := &url.URL{ Scheme: "postgres", diff --git a/server/internal/database/destination.go b/server/internal/database/destination.go index fc6a509a..0316ce87 100644 --- a/server/internal/database/destination.go +++ b/server/internal/database/destination.go @@ -2,132 +2,96 @@ package database import ( "fmt" - "time" - "github.com/beego/beego/v2/client/orm" - - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/utils" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils" ) -// DestinationORM handles database operations for destinations -type DestinationORM struct { - ormer orm.Ormer - TableName string -} - -func NewDestinationORM() *DestinationORM { - return &DestinationORM{ - ormer: orm.NewOrm(), - TableName: constants.TableNameMap[constants.DestinationTable], - } -} - // decryptDestinationSliceConfigs decrypts config fields for a slice of destinations -func (r *DestinationORM) decryptDestinationSliceConfigs(destinations []*models.Destination) error { +func (db *Database) decryptDestinationSliceConfigs(destinations []*models.Destination) error { for _, dest := range destinations { dConfig, err := utils.Decrypt(dest.Config) if err != nil { - return fmt.Errorf("failed to decrypt destination config: %s", err) + return fmt.Errorf("failed to decrypt destination config id[%d]: %s", dest.ID, err) } dest.Config = dConfig } return nil } -func (r *DestinationORM) Create(destination *models.Destination) error { +func (db *Database) CreateDestination(destination *models.Destination) error { // Encrypt config before saving eConfig, err := utils.Encrypt(destination.Config) if err != nil { - return fmt.Errorf("failed to encrypt destination config: %s", err) + return fmt.Errorf("failed to encrypt destination config id[%d]: %s", destination.ID, err) } destination.Config = eConfig - _, err = r.ormer.Insert(destination) + _, err = db.ormer.Insert(destination) return err } -func (r *DestinationORM) GetAll() ([]*models.Destination, error) { +func (db *Database) ListDestinations() ([]*models.Destination, error) { var destinations []*models.Destination - _, err := r.ormer.QueryTable(r.TableName).RelatedSel().All(&destinations) + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.DestinationTable]).RelatedSel().OrderBy(constants.OrderByUpdatedAtDesc).All(&destinations) if err != nil { - return nil, fmt.Errorf("failed to get all destinations: %s", err) + return nil, fmt.Errorf("failed to list destinations: %s", err) } // Decrypt config after reading - if err := r.decryptDestinationSliceConfigs(destinations); err != nil { - return nil, fmt.Errorf("failed to decrypt destination config: %s", err) + if err := db.decryptDestinationSliceConfigs(destinations); err != nil { + return nil, err } return destinations, nil } -func (r *DestinationORM) GetAllByProjectID(projectID string) ([]*models.Destination, error) { +func (db *Database) ListDestinationsByProjectID(projectID string) ([]*models.Destination, error) { var destinations []*models.Destination - _, err := r.ormer.QueryTable(r.TableName).Filter("project_id", projectID).RelatedSel().All(&destinations) + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.DestinationTable]).Filter("project_id", projectID).RelatedSel().OrderBy(constants.OrderByUpdatedAtDesc).All(&destinations) if err != nil { - return nil, fmt.Errorf("failed to get all destinations by project_id[%s]: %s", projectID, err) + return nil, fmt.Errorf("failed to list destinations project_id[%s]: %s", projectID, err) } // Decrypt config after reading - if err := r.decryptDestinationSliceConfigs(destinations); err != nil { - return nil, fmt.Errorf("failed to decrypt destination config: %s", err) + if err := db.decryptDestinationSliceConfigs(destinations); err != nil { + return nil, err } return destinations, nil } -func (r *DestinationORM) GetByID(id int) (*models.Destination, error) { +func (db *Database) GetDestinationByID(id int) (*models.Destination, error) { destination := &models.Destination{ID: id} - err := r.ormer.Read(destination) + err := db.ormer.Read(destination) if err != nil { - return nil, fmt.Errorf("failed to get destination by ID: %s", err) + return nil, fmt.Errorf("failed to get destination id[%d]: %s", id, err) } // Decrypt config after reading dConfig, err := utils.Decrypt(destination.Config) if err != nil { - return nil, fmt.Errorf("failed to decrypt config for destination[%d]: %s", destination.ID, err) + return nil, fmt.Errorf("failed to decrypt destination config id[%d]: %s", destination.ID, err) } destination.Config = dConfig return destination, nil } -func (r *DestinationORM) Update(destination *models.Destination) error { - destination.UpdatedAt = time.Now() - +func (db *Database) UpdateDestination(destination *models.Destination) error { // Encrypt config before saving eConfig, err := utils.Encrypt(destination.Config) if err != nil { - return fmt.Errorf("failed to encrypt destination config: %s", err) + return fmt.Errorf("failed to encrypt destination[%d] config: %s", destination.ID, err) } destination.Config = eConfig - _, err = r.ormer.Update(destination) + _, err = db.ormer.Update(destination) return err } -func (r *DestinationORM) Delete(id int) error { +func (db *Database) DeleteDestination(id int) error { destination := &models.Destination{ID: id} - _, err := r.ormer.Delete(destination) + // Use ORM's Delete method which will automatically handle the soft delete + // by setting the DeletedAt field due to the ORM tags in BaseModel + _, err := db.ormer.Delete(destination) return err } - -// GetByNameAndType retrieves destinations by name, destType, and project ID -func (r *DestinationORM) GetByNameAndType(name, destType, projectID string) ([]*models.Destination, error) { - var destinations []*models.Destination - _, err := r.ormer.QueryTable(r.TableName). - Filter("name", name). - Filter("dest_type", destType). - Filter("project_id", projectID). - All(&destinations) - if err != nil { - return nil, fmt.Errorf("failed to get destination in project[%s] by name[%s] and type[%s]: %s", projectID, name, destType, err) - } - - // Decrypt config after reading - if err := r.decryptDestinationSliceConfigs(destinations); err != nil { - return nil, fmt.Errorf("failed to decrypt destination config: %s", err) - } - - return destinations, nil -} diff --git a/server/internal/database/job.go b/server/internal/database/job.go index 69132434..4f7559ed 100644 --- a/server/internal/database/job.go +++ b/server/internal/database/job.go @@ -2,37 +2,22 @@ package database import ( "fmt" - "time" "github.com/beego/beego/v2/client/orm" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/utils" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils" ) -// JobORM handles database operations for jobs -type JobORM struct { - ormer orm.Ormer - TableName string -} - -// NewJobORM creates a new instance of JobORM -func NewJobORM() *JobORM { - return &JobORM{ - ormer: orm.NewOrm(), - TableName: constants.TableNameMap[constants.JobTable], - } -} - // decryptJobConfig decrypts Config fields in related Source and Destination -func (r *JobORM) decryptJobConfig(job *models.Job) error { +func (db *Database) decryptJobConfig(job *models.Job) error { // Decrypt Source Config if loaded // TODO: verify why source_id and dest_id coming nil, it must not nil if job.SourceID != nil { decryptedConfig, err := utils.Decrypt(job.SourceID.Config) if err != nil { - return fmt.Errorf("failed to decrypt source config: %s", err) + return fmt.Errorf("failed to decrypt source config job_id[%d] source_id[%d]: %s", job.ID, job.SourceID.ID, err) } job.SourceID.Config = decryptedConfig } @@ -41,7 +26,7 @@ func (r *JobORM) decryptJobConfig(job *models.Job) error { if job.DestID != nil { decryptedConfig, err := utils.Decrypt(job.DestID.Config) if err != nil { - return fmt.Errorf("failed to decrypt destination config: %s", err) + return fmt.Errorf("failed to decrypt destination config job_id[%d] dest_id[%d]: %s", job.ID, job.DestID.ID, err) } job.DestID.Config = decryptedConfig } @@ -50,179 +35,152 @@ func (r *JobORM) decryptJobConfig(job *models.Job) error { } // decryptJobSliceConfig decrypts related entities for a slice of jobs -func (r *JobORM) decryptJobSliceConfig(jobs []*models.Job) error { +func (db *Database) decryptJobSliceConfig(jobs []*models.Job) error { for _, job := range jobs { - if err := r.decryptJobConfig(job); err != nil { - return fmt.Errorf("failed to decrypt job config: %s", err) + if err := db.decryptJobConfig(job); err != nil { + return fmt.Errorf("failed to decrypt job config job_id[%d]: %s", job.ID, err) } } return nil } // Create a new job -func (r *JobORM) Create(job *models.Job) error { - _, err := r.ormer.Insert(job) +func (db *Database) CreateJob(job *models.Job) error { + _, err := db.ormer.Insert(job) return err } // GetAll retrieves all jobs -func (r *JobORM) GetAll() ([]*models.Job, error) { +func (db *Database) ListJobs() ([]*models.Job, error) { var jobs []*models.Job - _, err := r.ormer.QueryTable(r.TableName).RelatedSel().All(&jobs) + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.JobTable]).RelatedSel().OrderBy(constants.OrderByUpdatedAtDesc).All(&jobs) if err != nil { - return nil, fmt.Errorf("failed to get all jobs: %s", err) + return nil, fmt.Errorf("failed to list jobs: %s", err) } // Decrypt related Source and Destination configs - if err := r.decryptJobSliceConfig(jobs); err != nil { - return nil, fmt.Errorf("failed to decrypt job config: %s", err) + if err := db.decryptJobSliceConfig(jobs); err != nil { + return nil, err } return jobs, nil } -// GetAllByProjectID retrieves all jobs for a specific project -func (r *JobORM) GetAllByProjectID(projectID string) ([]*models.Job, error) { +// GetAllJobsByProjectID retrieves all jobs belonging to a specific project, +// including related Source and Destination, sorted by latest update time. +func (db *Database) ListJobsByProjectID(projectID string) ([]*models.Job, error) { var jobs []*models.Job - // Query sources in the project - sourceTable := constants.TableNameMap[constants.SourceTable] - sources := []int{} - _, err := r.ormer.Raw(fmt.Sprintf(`SELECT id FROM %q WHERE project_id = ?`, sourceTable), projectID).QueryRows(&sources) - if err != nil { - return nil, fmt.Errorf("failed to get sources for project ID %s: %s", projectID, err) - } - - // Query destinations in the project - destTable := constants.TableNameMap[constants.DestinationTable] - destinations := []int{} - _, err = r.ormer.Raw(fmt.Sprintf(`SELECT id FROM %q WHERE project_id = ?`, destTable), projectID).QueryRows(&destinations) + // Directly query jobs filtered by project_id — since each job already stores project_id + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.JobTable]). + Filter("project_id", projectID). + RelatedSel(). + OrderBy(constants.OrderByUpdatedAtDesc). + All(&jobs) if err != nil { - return nil, fmt.Errorf("failed to get destinations for project ID %s: %s", projectID, err) - } - - // If no sources or destinations in the project, return empty array - if len(sources) == 0 && len(destinations) == 0 { - return jobs, nil + return nil, fmt.Errorf("failed to list jobs project_id[%s]: %s", projectID, err) } - // Build query - qs := r.ormer.QueryTable(r.TableName) - // Filter by sources or destinations from the project - if len(sources) > 0 { - qs = qs.Filter("source_id__in", sources) - } - - if len(destinations) > 0 { - qs = qs.Filter("dest_id__in", destinations) - } - - // Add RelatedSel to load the related Source and Destination objects - _, err = qs.RelatedSel().All(&jobs) - if err != nil { - return nil, fmt.Errorf("failed to get jobs with related data for project ID %s: %s", projectID, err) + // If project has no jobs, return empty slice (not nil) + if len(jobs) == 0 { + return []*models.Job{}, nil } // Decrypt related Source and Destination configs - if err := r.decryptJobSliceConfig(jobs); err != nil { - return nil, fmt.Errorf("failed to decrypt job config: %s", err) + if err := db.decryptJobSliceConfig(jobs); err != nil { + return nil, err } return jobs, nil } // GetByID retrieves a job by ID -func (r *JobORM) GetByID(id int, decrypt bool) (*models.Job, error) { +func (db *Database) GetJobByID(id int, decrypt bool) (*models.Job, error) { job := &models.Job{ID: id} - err := r.ormer.Read(job) + err := db.ormer.Read(job) if err != nil { - return nil, fmt.Errorf("failed to get job by ID: %s", err) + return nil, fmt.Errorf("failed to get job id[%d]: %s", id, err) } // Load related entities (Source, Destination, etc.) - _, err = r.ormer.LoadRelated(job, "SourceID") + _, err = db.ormer.LoadRelated(job, "SourceID") if err != nil { - return nil, fmt.Errorf("failed to get job by ID: %s", err) + return nil, fmt.Errorf("failed to load source entities job_id[%d]: %s", id, err) } - _, err = r.ormer.LoadRelated(job, "DestID") + + _, err = db.ormer.LoadRelated(job, "DestID") if err != nil { - return nil, fmt.Errorf("failed to get job by ID: %s", err) + return nil, fmt.Errorf("failed to load destination entities job_id[%d]: %s", id, err) } // Decrypt related Source and Destination configs if decrypt { - if err := r.decryptJobConfig(job); err != nil { - return nil, fmt.Errorf("failed to decrypt job config: %s", err) + if err := db.decryptJobConfig(job); err != nil { + return nil, err } } return job, nil } -// Update a job -func (r *JobORM) Update(job *models.Job) error { - job.UpdatedAt = time.Now() - _, err := r.ormer.Update(job) - return err -} - -// Delete a job -func (r *JobORM) Delete(id int) error { - job := &models.Job{ID: id} - _, err := r.ormer.Delete(job) - return err -} - -// GetBySourceID retrieves all jobs associated with a source ID -func (r *JobORM) GetBySourceID(sourceID int) ([]*models.Job, error) { +func (db *Database) GetJobsBySourceID(sourceIDs []int) ([]*models.Job, error) { var jobs []*models.Job - source := &models.Source{ID: sourceID} - - _, err := r.ormer.QueryTable(r.TableName). - Filter("source_id", source). - RelatedSel(). - All(&jobs) - if err != nil { - return nil, fmt.Errorf("failed to get jobs by source ID: %s", err) + if len(sourceIDs) == 0 { + return jobs, nil } - - // Decrypt related Source and Destination configs - if err := r.decryptJobSliceConfig(jobs); err != nil { - return nil, fmt.Errorf("failed to decrypt job config: %s", err) + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.JobTable]).Filter("source_id__in", sourceIDs).RelatedSel().All(&jobs) + if err != nil { + return nil, err } - return jobs, nil } -// GetByDestinationID retrieves all jobs associated with a destination ID -func (r *JobORM) GetByDestinationID(destID int) ([]*models.Job, error) { +func (db *Database) GetJobsByDestinationID(destIDs []int) ([]*models.Job, error) { var jobs []*models.Job - dest := &models.Destination{ID: destID} - - _, err := r.ormer.QueryTable(r.TableName). - Filter("dest_id", dest). - RelatedSel(). - All(&jobs) + if len(destIDs) == 0 { + return jobs, nil + } + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.JobTable]).Filter("dest_id__in", destIDs).RelatedSel().All(&jobs) if err != nil { - return nil, fmt.Errorf("failed to get jobs by destination ID: %s", err) + return nil, err } + return jobs, nil +} - // Decrypt related Source and Destination configs - if err := r.decryptJobSliceConfig(jobs); err != nil { - return nil, fmt.Errorf("failed to decrypt job config: %s", err) +// Update a job +func (db *Database) UpdateJob(job *models.Job) error { + _, err := db.ormer.Update(job) + return err +} + +// BulkDeactivate deactivates multiple jobs by their IDs in a single query +func (db *Database) DeactivateJobs(ids []int) error { + if len(ids) == 0 { + return nil } - return jobs, nil + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.JobTable]). + Filter("id__in", ids). + Update(orm.Params{ + "active": false, + }) + return err +} + +// Delete a job +func (db *Database) DeleteJob(id int) error { + _, err := db.ormer.Delete(&models.Job{ID: id}) + return err } // IsJobNameUnique checks if a job name is unique within a project in the jobs table. -func (r *JobORM) IsJobNameUnique(projectID, jobName string) (bool, error) { - count, err := r.ormer.QueryTable(r.TableName). +func (db *Database) IsJobNameUniqueInProject(projectID, jobName string) (bool, error) { + count, err := db.ormer.QueryTable(constants.TableNameMap[constants.JobTable]). Filter("name", jobName). Filter("project_id", projectID). Count() if err != nil { - return false, fmt.Errorf("failed to check job name uniqueness: %w", err) + return false, fmt.Errorf("failed to check job name uniqueness project_id[%s] job_name[%s]: %s", projectID, jobName, err) } return count == 0, nil } diff --git a/server/internal/database/source.go b/server/internal/database/source.go index 97932b24..273765ad 100644 --- a/server/internal/database/source.go +++ b/server/internal/database/source.go @@ -2,117 +2,79 @@ package database import ( "fmt" - "time" - "github.com/beego/beego/v2/client/orm" - - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/utils" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils" ) -// SourceORM handles database operations for sources -type SourceORM struct { - ormer orm.Ormer - TableName string -} - -func NewSourceORM() *SourceORM { - return &SourceORM{ - ormer: orm.NewOrm(), - TableName: constants.TableNameMap[constants.SourceTable], - } -} - // decryptSourceSliceConfigs decrypts config fields for a slice of sources -func (r *SourceORM) decryptSourceSliceConfigs(sources []*models.Source) error { +func (db *Database) decryptSourceSliceConfigs(sources []*models.Source) error { for _, source := range sources { dConfig, err := utils.Decrypt(source.Config) if err != nil { - return fmt.Errorf("failed to decrypt source config: %s", err) + return fmt.Errorf("failed to decrypt source config id[%d]: %s", source.ID, err) } source.Config = dConfig } return nil } -func (r *SourceORM) Create(source *models.Source) error { +func (db *Database) CreateSource(source *models.Source) error { // Encrypt config before saving eConfig, err := utils.Encrypt(source.Config) if err != nil { - return fmt.Errorf("failed to encrypt source config: %s", err) + return fmt.Errorf("failed to encrypt source config id[%d]: %s", source.ID, err) } source.Config = eConfig - _, err = r.ormer.Insert(source) + _, err = db.ormer.Insert(source) return err } -func (r *SourceORM) GetAll() ([]*models.Source, error) { +func (db *Database) ListSources() ([]*models.Source, error) { var sources []*models.Source - _, err := r.ormer.QueryTable(r.TableName).RelatedSel().All(&sources) + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.SourceTable]).RelatedSel().OrderBy(constants.OrderByUpdatedAtDesc).All(&sources) if err != nil { - return nil, fmt.Errorf("failed to get all sources: %s", err) + return nil, fmt.Errorf("failed to list sources: %s", err) } // Decrypt config after reading - if err := r.decryptSourceSliceConfigs(sources); err != nil { - return nil, fmt.Errorf("failed to decrypt source config: %s", err) + if err := db.decryptSourceSliceConfigs(sources); err != nil { + return nil, err } return sources, nil } -func (r *SourceORM) GetByID(id int) (*models.Source, error) { +func (db *Database) GetSourceByID(id int) (*models.Source, error) { source := &models.Source{ID: id} - err := r.ormer.Read(source) + err := db.ormer.Read(source) if err != nil { - return nil, fmt.Errorf("failed to get source by id[%d]: %s", id, err) + return nil, fmt.Errorf("failed to get source id[%d]: %s", id, err) } // Decrypt config after reading dConfig, err := utils.Decrypt(source.Config) if err != nil { - return nil, fmt.Errorf("failed to decrypt source config by id[%d]: %s", source.ID, err) + return nil, fmt.Errorf("failed to decrypt source config id[%d]: %s", source.ID, err) } source.Config = dConfig return source, nil } -func (r *SourceORM) Update(source *models.Source) error { - // TODO: remove all code managed db timestamps - source.UpdatedAt = time.Now() +func (db *Database) UpdateSource(source *models.Source) error { // Encrypt config before saving eConfig, err := utils.Encrypt(source.Config) if err != nil { - return fmt.Errorf("failed to encrypt source config: %s", err) + return fmt.Errorf("failed to encrypt source config id[%d]: %s", source.ID, err) } source.Config = eConfig - _, err = r.ormer.Update(source) + _, err = db.ormer.Update(source) return err } -func (r *SourceORM) Delete(id int) error { +func (db *Database) DeleteSource(id int) error { source := &models.Source{ID: id} - _, err := r.ormer.Delete(source) + _, err := db.ormer.Delete(source) return err } - -// GetByNameAndType retrieves sources by name, type, and project ID -func (r *SourceORM) GetByNameAndType(name, sourceType, projectIDStr string) ([]*models.Source, error) { - var sources []*models.Source - _, err := r.ormer.QueryTable(r.TableName). - Filter("name", name). - Filter("type", sourceType). - Filter("project_id", projectIDStr). - All(&sources) - if err != nil { - return nil, fmt.Errorf("failed to get source by name: %s and type: %s and project_id: %s: %s", name, sourceType, projectIDStr, err) - } - - // Decrypt config after reading - if err := r.decryptSourceSliceConfigs(sources); err != nil { - return nil, fmt.Errorf("failed to decrypt source config: %s", err) - } - - return sources, nil -} diff --git a/server/internal/database/user.go b/server/internal/database/user.go index baac82aa..9cdf70fb 100644 --- a/server/internal/database/user.go +++ b/server/internal/database/user.go @@ -2,68 +2,52 @@ package database import ( "fmt" - "time" - "github.com/beego/beego/v2/client/orm" "golang.org/x/crypto/bcrypt" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" ) -// UserORM handles database operations -type UserORM struct { - ormer orm.Ormer - TableName string -} - -func NewUserORM() *UserORM { - return &UserORM{ - ormer: orm.NewOrm(), - TableName: constants.TableNameMap[constants.UserTable], - } -} - -func (r *UserORM) FindByUsername(username string) (*models.User, error) { +func (db *Database) GetUserByUsername(username string) (*models.User, error) { var user models.User - err := r.ormer.QueryTable(r.TableName).Filter("username", username).One(&user) + err := db.ormer.QueryTable(constants.TableNameMap[constants.UserTable]).Filter("username", username).One(&user) return &user, err } -func (r *UserORM) ComparePassword(hashedPassword, plainPassword string) error { +func (db *Database) CompareUserPassword(hashedPassword, plainPassword string) error { return bcrypt.CompareHashAndPassword([]byte(hashedPassword), []byte(plainPassword)) } -func (r *UserORM) Create(user *models.User) error { - exists := r.ormer.QueryTable(r.TableName).Filter("username", user.Username).Exist() +func (db *Database) CreateUser(user *models.User) error { + exists := db.ormer.QueryTable(constants.TableNameMap[constants.UserTable]).Filter("username", user.Username).Exist() if exists { return fmt.Errorf("username already exists") } - _, err := r.ormer.Insert(user) + _, err := db.ormer.Insert(user) return err } -func (r *UserORM) GetAll() ([]*models.User, error) { +func (db *Database) ListUsers() ([]*models.User, error) { var users []*models.User - _, err := r.ormer.QueryTable(r.TableName).All(&users) + _, err := db.ormer.QueryTable(constants.TableNameMap[constants.UserTable]).All(&users) return users, err } -func (r *UserORM) GetByID(id int) (*models.User, error) { +func (db *Database) GetUserByID(id int) (*models.User, error) { user := &models.User{ID: id} - err := r.ormer.Read(user) + err := db.ormer.Read(user) return user, err } -func (r *UserORM) Update(user *models.User) error { - user.UpdatedAt = time.Now() - _, err := r.ormer.Update(user) +func (db *Database) UpdateUser(user *models.User) error { + _, err := db.ormer.Update(user) return err } -func (r *UserORM) Delete(id int) error { +func (db *Database) DeleteUser(id int) error { user := &models.User{ID: id} - _, err := r.ormer.Delete(user) + _, err := db.ormer.Delete(user) return err } diff --git a/server/internal/docker/runner.go b/server/internal/docker/runner.go deleted file mode 100644 index d123e8df..00000000 --- a/server/internal/docker/runner.go +++ /dev/null @@ -1,497 +0,0 @@ -package docker - -import ( - "context" - "crypto/sha256" - "encoding/json" - "fmt" - "os" - "os/exec" - "path/filepath" - "strconv" - "strings" - - "github.com/beego/beego/v2/core/logs" - "github.com/beego/beego/v2/server/web" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "github.com/datazip/olake-frontend/server/utils" - "golang.org/x/mod/semver" -) - -// Constants -const ( - DefaultDirPermissions = 0755 - DefaultFilePermissions = 0644 -) - -// Command represents a Docker command type -type Command string - -const ( - Discover Command = "discover" - Spec Command = "spec" - Check Command = "check" - Sync Command = "sync" -) - -// File configuration for different operations -type FileConfig struct { - Name string - Data string -} - -// Runner is responsible for executing Docker commands -type Runner struct { - WorkingDir string - anonymousID string -} - -// NewRunner creates a new Docker runner -func NewRunner(workingDir string) *Runner { - if err := utils.CreateDirectory(workingDir, DefaultDirPermissions); err != nil { - logs.Critical("Failed to create working directory %s: %v", workingDir, err) - } - - return &Runner{ - WorkingDir: workingDir, - anonymousID: telemetry.GetTelemetryUserID(), - } -} - -// GetDefaultConfigDir returns the default directory for storing config files -func GetDefaultConfigDir() string { - return constants.DefaultConfigDir -} - -// setupWorkDirectory creates a working directory and returns the full path -func (r *Runner) setupWorkDirectory(subDir string) (string, error) { - workDir := filepath.Join(r.WorkingDir, subDir) - if err := utils.CreateDirectory(workDir, DefaultDirPermissions); err != nil { - return "", fmt.Errorf("failed to create work directory: %v", err) - } - return workDir, nil -} - -// writeConfigFiles writes multiple configuration files to the specified directory -func (r *Runner) writeConfigFiles(workDir string, configs []FileConfig) error { - for _, config := range configs { - filePath := filepath.Join(workDir, config.Name) - if err := utils.WriteFile(filePath, []byte(config.Data), DefaultFilePermissions); err != nil { - return fmt.Errorf("failed to write %s: %v", config.Name, err) - } - } - return nil -} - -// GetDockerImageName constructs a Docker image name based on source type and version -func (r *Runner) GetDockerImageName(sourceType, version string) string { - return fmt.Sprintf("olakego/source-%s:%s", sourceType, version) -} - -// ExecuteDockerCommand executes a Docker command with the given parameters -func (r *Runner) ExecuteDockerCommand(ctx context.Context, containerName, flag string, command Command, sourceType, version, configPath string, additionalArgs ...string) ([]byte, error) { - outputDir := filepath.Dir(configPath) - if err := utils.CreateDirectory(outputDir, DefaultDirPermissions); err != nil { - return nil, err - } - - dockerArgs := r.buildDockerArgs(ctx, containerName, flag, command, sourceType, version, configPath, outputDir, additionalArgs...) - if len(dockerArgs) == 0 { - return nil, fmt.Errorf("failed to build docker args") - } - - logs.Info("Running Docker command: docker %s\n", strings.Join(dockerArgs, " ")) - - dockerCmd := exec.CommandContext(ctx, "docker", dockerArgs...) - output, err := dockerCmd.CombinedOutput() - - logs.Info("Docker command output: %s\n", string(output)) - - if err != nil { - if exitErr, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("docker command failed with exit status %d", exitErr.ExitCode()) - } - return nil, err - } - - return output, nil -} - -// buildDockerArgs constructs Docker command arguments -func (r *Runner) buildDockerArgs(ctx context.Context, containerName, flag string, command Command, sourceType, version, configPath, outputDir string, additionalArgs ...string) []string { - hostOutputDir := r.getHostOutputDir(outputDir) - - repositoryBase, err := web.AppConfig.String("CONTAINER_REGISTRY_BASE") - if err != nil { - logs.Critical("failed to get CONTAINER_REGISTRY_BASE: %s", err) - return nil - } - imageName := r.GetDockerImageName(sourceType, version) - - // If using ECR, ensure login before run - if strings.Contains(repositoryBase, "ecr") { - imageName = fmt.Sprintf("%s/%s", repositoryBase, imageName) - accountID, region, _, err := utils.ParseECRDetails(imageName) - if err != nil { - logs.Critical("failed to parse ECR details: %s", err) - return nil - } - if err := utils.DockerLoginECR(ctx, region, accountID); err != nil { - logs.Critical("failed to login to ECR: %s", err) - return nil - } - } - - // base docker args - dockerArgs := []string{"run", "--name", containerName} - - if hostOutputDir != "" { - dockerArgs = append(dockerArgs, "-v", fmt.Sprintf("%s:/mnt/config", hostOutputDir)) - } - - for key, value := range utils.GetWorkerEnvVars() { - dockerArgs = append(dockerArgs, "-e", fmt.Sprintf("%s=%s", key, value)) - } - - dockerArgs = append(dockerArgs, imageName, string(command)) - - if flag != "" { - dockerArgs = append(dockerArgs, fmt.Sprintf("--%s", flag)) - } - - if configPath != "" { - dockerArgs = append(dockerArgs, fmt.Sprintf("/mnt/config/%s", filepath.Base(configPath))) - } - - if encryptionKey := os.Getenv(constants.EncryptionKey); encryptionKey != "" { - dockerArgs = append(dockerArgs, "--encryption-key", encryptionKey) - } - - return append(dockerArgs, additionalArgs...) -} - -// getHostOutputDir determines the host output directory path -func (r *Runner) getHostOutputDir(outputDir string) string { - if persistentDir := os.Getenv("PERSISTENT_DIR"); persistentDir != "" { - hostOutputDir := strings.Replace(outputDir, constants.DefaultConfigDir, persistentDir, 1) - logs.Info("hostOutputDir %s\n", hostOutputDir) - return hostOutputDir - } - return outputDir -} - -func (r *Runner) FetchSpec(ctx context.Context, destinationType, sourceType, version, workflowID string) (models.SpecOutput, error) { - flag := utils.Ternary(destinationType != "", "destination-type", "").(string) - dockerArgs := r.buildDockerArgs(ctx, workflowID, flag, Spec, sourceType, version, "", "", destinationType) - - cmd := exec.CommandContext(ctx, "docker", dockerArgs...) - logs.Info("Running Docker command: docker %s\n", strings.Join(dockerArgs, " ")) - output, err := cmd.CombinedOutput() - if err != nil { - return models.SpecOutput{}, fmt.Errorf("docker command failed: %v\nOutput: %s", err, string(output)) - } - spec, err := utils.ExtractJSON(string(output)) - if err != nil { - return models.SpecOutput{}, fmt.Errorf("failed to parse spec: %s", string(output)) - } - return models.SpecOutput{Spec: spec}, nil -} - -// TestConnection runs the check command and returns connection status -func (r *Runner) TestConnection(ctx context.Context, flag, sourceType, version, config, workflowID string) (map[string]interface{}, error) { - workDir, err := r.setupWorkDirectory(workflowID) - if err != nil { - return nil, err - } - - configs := []FileConfig{ - {Name: "config.json", Data: config}, - {Name: "user_id.txt", Data: r.anonymousID}, - } - - if err := r.writeConfigFiles(workDir, configs); err != nil { - return nil, err - } - - configPath := filepath.Join(workDir, "config.json") - output, err := r.ExecuteDockerCommand(ctx, workflowID, flag, Check, sourceType, version, configPath) - if err != nil { - return nil, err - } - - logs.Info("check command output: %s\n", string(output)) - - logMsg, err := utils.ExtractJSON(string(output)) - if err != nil { - return nil, err - } - - connectionStatus, ok := logMsg["connectionStatus"].(map[string]interface{}) - if !ok || connectionStatus == nil { - return nil, fmt.Errorf("connection status not found") - } - - status, statusOk := connectionStatus["status"].(string) - message, _ := connectionStatus["message"].(string) // message is optional - if !statusOk { - return nil, fmt.Errorf("connection status not found") - } - - return map[string]interface{}{ - "message": message, - "status": status, - }, nil -} - -// GetCatalog runs the discover command and returns catalog data -func (r *Runner) GetCatalog(ctx context.Context, sourceType, version, config, workflowID, streamsConfig, jobName string) (map[string]interface{}, error) { - workDir, err := r.setupWorkDirectory(workflowID) - if err != nil { - return nil, err - } - configs := []FileConfig{ - {Name: "config.json", Data: config}, - {Name: "streams.json", Data: streamsConfig}, - {Name: "user_id.txt", Data: r.anonymousID}, - } - - if err := r.writeConfigFiles(workDir, configs); err != nil { - return nil, err - } - - configPath := filepath.Join(workDir, "config.json") - catalogPath := filepath.Join(workDir, "streams.json") - var catalogsArgs []string - if streamsConfig != "" { - catalogsArgs = append(catalogsArgs, "--catalog", "/mnt/config/streams.json") - } - if jobName != "" && semver.Compare(version, "v0.2.0") >= 0 { - catalogsArgs = append(catalogsArgs, "--destination-database-prefix", jobName) - } - _, err = r.ExecuteDockerCommand(ctx, workflowID, "config", Discover, sourceType, version, configPath, catalogsArgs...) - if err != nil { - return nil, err - } - - // Simplified JSON parsing - just parse if exists, return error if not - return utils.ParseJSONFile(catalogPath) -} - -// RunSync runs the sync command to transfer data from source to destination -func (r *Runner) RunSync(ctx context.Context, jobID int, workflowID string) (map[string]interface{}, error) { - // Deterministic container name to enable adoption across retries - containerName := WorkflowHash(workflowID) - - // Setup work dir and configs - workDir, err := r.setupWorkDirectory(containerName) - if err != nil { - logs.Error("workflowID %s: failed to setup work directory: %s", workflowID, err) - return nil, err - } - - // Marker to indicate we have launched once; prevents relaunch after retries - launchedMarker := filepath.Join(workDir, "logs") - - // Inspect container state - state := getContainerState(ctx, containerName, workflowID) - - // 1) If container is running, adopt and wait for completion - if state.Exists && state.Running { - logs.Info("workflowID %s: adopting running container %s", workflowID, containerName) - if err := waitContainer(ctx, containerName, workflowID); err != nil { - logs.Error("workflowID %s: container wait failed: %s", workflowID, err) - return nil, err - } - state = getContainerState(ctx, containerName, workflowID) - } - - // 2) If container exists and exited, treat as finished: cleanup and return status - if state.Exists && !state.Running && state.ExitCode != nil { - logs.Info("workflowID %s: container %s exited with code %d", workflowID, containerName, *state.ExitCode) - if *state.ExitCode == 0 { - return map[string]interface{}{"status": "completed"}, nil - } - // Return typed error so policy can decide retry vs. fail-fast - return nil, fmt.Errorf("workflowID %s: container %s exit %d", workflowID, containerName, *state.ExitCode) - } - - // 3) First launch path: only if we never launched and nothing is running - if _, err := os.Stat(launchedMarker); os.IsNotExist(err) { - logs.Info("workflowID %s: first launch path, preparing configs", workflowID) - jobORM := database.NewJobORM() - job, err := jobORM.GetByID(jobID, false) - if err != nil { - logs.Error("workflowID %s: failed to fetch job %d: %s", workflowID, jobID, err) - return nil, err - } - configs := []FileConfig{ - {Name: "config.json", Data: job.SourceID.Config}, - {Name: "streams.json", Data: job.StreamsConfig}, - {Name: "writer.json", Data: job.DestID.Config}, - {Name: "state.json", Data: job.State}, - {Name: "user_id.txt", Data: r.anonymousID}, - } - if err := r.writeConfigFiles(workDir, configs); err != nil { - logs.Error("workflowID %s: failed to write config files: %s", workflowID, err) - return nil, err - } - - configPath := filepath.Join(workDir, "config.json") - logs.Info("workflowID %s: executing docker container %s", workflowID, containerName) - - if _, err = r.ExecuteDockerCommand( - ctx, - containerName, - "config", - Sync, - job.SourceID.Type, - job.SourceID.Version, - configPath, - "--catalog", "/mnt/config/streams.json", - "--destination", "/mnt/config/writer.json", - "--state", "/mnt/config/state.json", - ); err != nil { - logs.Error("workflowID %s: docker execution failed: %s", workflowID, err) - return nil, err - } - - logs.Info("workflowID %s: container %s completed successfully", workflowID, containerName) - return map[string]interface{}{"status": "completed"}, nil - } - // Skip if container is not running, was already launched (logs exist), and no new run is needed. - logs.Info("workflowID %s: container %s already handled, skipping launch", workflowID, containerName) - return map[string]interface{}{"status": "skipped"}, nil -} - -type ContainerState struct { - Exists bool - Running bool - ExitCode *int -} - -func getContainerState(ctx context.Context, name, workflowID string) ContainerState { - // docker inspect returns fields if exists - cmd := exec.CommandContext(ctx, "docker", "inspect", "-f", "{{.State.Status}} {{.State.Running}} {{.State.ExitCode}}", name) - out, err := cmd.CombinedOutput() - if err != nil { - // treat not found as non-existent - logs.Warn("workflowID %s: docker inspect failed for %s: %s, output: %s", workflowID, name, err, string(out)) - return ContainerState{Exists: false} - } - // Split Docker inspect output into fields: status, running flag, and exit code - // Example: "exited false 137" → parts[0]="exited", parts[1]="false", parts[2]="137" - parts := strings.Fields(strings.TrimSpace(string(out))) - if len(parts) < 3 { - return ContainerState{Exists: false} - } - // Docker .State.Status can be "created", "running", "paused", "restarting", "removing", "exited", or "dead"; we only handle running vs exited/dead. - status := parts[0] - running := parts[1] == "true" - var ec *int - if !running && (status == "exited" || status == "dead") { - if code, convErr := strconv.Atoi(parts[2]); convErr == nil { - ec = &code - } - } - return ContainerState{Exists: true, Running: running, ExitCode: ec} -} - -func waitContainer(ctx context.Context, name, workflowID string) error { - // docker wait prints exit code; validate non-zero as error - cmd := exec.CommandContext(ctx, "docker", "wait", name) - out, err := cmd.CombinedOutput() - if err != nil { - logs.Error("workflowID %s: docker wait failed for %s: %s, output: %s", workflowID, name, err, string(out)) - return fmt.Errorf("docker wait failed: %s", err) - } - strOut := strings.TrimSpace(string(out)) - code, convErr := strconv.Atoi(strOut) - if convErr != nil { - logs.Error("workflowID %s: failed to parse exit code from docker wait output %q: %s", workflowID, strOut, convErr) - return fmt.Errorf("failed to parse exit code: %s", convErr) - } - - if code != 0 { - return fmt.Errorf("workflowID %s: container %s exited with code %d", workflowID, name, code) - } - return nil -} - -// StopContainer stops a container by name, falling back to kill if needed. -func StopContainer(ctx context.Context, workflowID string) error { - containerName := WorkflowHash(workflowID) - logs.Info("workflowID %s: stop request received for container %s", workflowID, containerName) - - if strings.TrimSpace(containerName) == "" { - logs.Warn("workflowID %s: empty container name", workflowID) - return fmt.Errorf("empty container name") - } - - stopCmd := exec.CommandContext(ctx, "docker", "stop", "-t", "5", containerName) - if out, err := stopCmd.CombinedOutput(); err != nil { - logs.Warn("workflowID %s: docker stop failed for %s: %s, output: %s", workflowID, containerName, err, string(out)) - killCmd := exec.CommandContext(ctx, "docker", "kill", containerName) - if kout, kerr := killCmd.CombinedOutput(); kerr != nil { - logs.Error("workflowID %s: docker kill failed for %s: %s, output: %s", workflowID, containerName, kerr, string(kout)) - return fmt.Errorf("docker kill failed: %s", kerr) - } - } - - rmCmd := exec.CommandContext(ctx, "docker", "rm", "-f", containerName) - if rmOut, rmErr := rmCmd.CombinedOutput(); rmErr != nil { - logs.Warn("workflowID %s: docker rm failed for %s: %s, output: %s", workflowID, containerName, rmErr, string(rmOut)) - } else { - logs.Info("workflowID %s: container %s removed successfully", workflowID, containerName) - } - - return nil -} - -// PersistJobStateFromFile reads the state JSON file and updates the job state -func (r *Runner) PersistJobStateFromFile(jobID int, workflowID string) error { - hashWorkflowID := WorkflowHash(workflowID) - workDir, err := r.setupWorkDirectory(hashWorkflowID) - if err != nil { - logs.Error("workflowID %s: failed to setup work directory: %s", workflowID, err) - return err - } - - statePath := filepath.Join(workDir, "state.json") - state, err := utils.ParseJSONFile(statePath) - if err != nil { - logs.Error("workflowID %s: failed to parse state file %s: %s", workflowID, statePath, err) - return err - } - - jobORM := database.NewJobORM() - job, err := jobORM.GetByID(jobID, false) - if err != nil { - logs.Error("workflowID %s: failed to fetch job %d: %s", workflowID, jobID, err) - return err - } - - stateJSON, err := json.Marshal(state) - if err != nil { - logs.Error("workflowID %s: failed to marshal state: %s", workflowID, err) - return err - } - - job.State = string(stateJSON) - job.Active = true - - if err := jobORM.Update(job); err != nil { - logs.Error("workflowID %s: failed to update job %d: %s", workflowID, jobID, err) - return err - } - - logs.Info("workflowID %s: job state persisted successfully for jobID %d", workflowID, jobID) - return nil -} - -// WorkflowHash returns a deterministic hash string for a given workflowID -func WorkflowHash(workflowID string) string { - return fmt.Sprintf("%x", sha256.Sum256([]byte(workflowID))) -} diff --git a/server/internal/handlers/auth.go b/server/internal/handlers/auth.go index ffee8e5e..52bb6612 100644 --- a/server/internal/handlers/auth.go +++ b/server/internal/handlers/auth.go @@ -1,118 +1,119 @@ package handlers import ( - "context" - "encoding/json" + "errors" + "fmt" "net/http" - "strings" "github.com/beego/beego/v2/server/web" - "golang.org/x/crypto/bcrypt" - - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "github.com/datazip/olake-frontend/server/utils" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/logger" + "github.com/datazip-inc/olake-ui/server/utils/telemetry" ) -type AuthHandler struct { - web.Controller - userORM *database.UserORM -} - -func (c *AuthHandler) Prepare() { - c.userORM = database.NewUserORM() -} - // @router /login [post] -func (c *AuthHandler) Login() { - var req models.LoginRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +func (h *Handler) Login() { + var req dto.LoginRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, constants.ValidationInvalidRequestFormat, err) return } - user, err := c.userORM.FindByUsername(req.Username) + logger.Debugf("Login initiated username[%s]", req.Username) + + user, err := h.etl.Login(h.Ctx.Request.Context(), req.Username, req.Password) if err != nil { - ErrorResponse := "Invalid credentials" - if strings.Contains(err.Error(), "no row found") { - ErrorResponse = "user not found, sign up first" + switch { + case errors.Is(err, constants.ErrUserNotFound): + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, fmt.Sprintf("user not found, sign up first: %s", err), err) + case errors.Is(err, constants.ErrInvalidCredentials): + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, fmt.Sprintf("Invalid credentials: %s", err), err) + default: + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("Login failed: %s", err), err) } - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, ErrorResponse) - return - } - - if err := c.userORM.ComparePassword(user.Password, req.Password); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid credentials") return } // check if session is enabled if web.BConfig.WebConfig.Session.SessionOn { - _ = c.SetSession(constants.SessionUserID, user.ID) + _ = h.SetSession(constants.SessionUserID, user.ID) } - telemetry.TrackUserLogin(context.Background(), user) - - utils.SuccessResponse(&c.Controller, map[string]interface{}{ + utils.SuccessResponse(&h.Controller, "login successful", map[string]interface{}{ "username": user.Username, }) } // @router /checkauth [get] -func (c *AuthHandler) CheckAuth() { - if userID := c.GetSession(constants.SessionUserID); userID == nil { - utils.ErrorResponse(&c.Controller, http.StatusUnauthorized, "Not authenticated") +func (h *Handler) CheckAuth() { + userID := h.GetSession(constants.SessionUserID) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", errors.New("not authenticated")) return } - utils.SuccessResponse(&c.Controller, models.LoginResponse{ - Message: "Authenticated", - Success: true, - }) + logger.Debugf("Check auth initiated user_id[%v]", userID) + + // Optional: Validate that the user still exists in the database + if userIDInt, ok := userID.(int); ok { + if err := h.etl.ValidateUser(userIDInt); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, fmt.Sprintf("Invalid session: %s", err), err) + return + } + } + + utils.SuccessResponse(&h.Controller, "authenticated successfully", nil) } // @router /logout [post] -func (c *AuthHandler) Logout() { - _ = c.DestroySession() - utils.SuccessResponse(&c.Controller, models.LoginResponse{ - Message: "Logged out successfully", - Success: true, - }) -} +func (h *Handler) Logout() { + userID := h.GetSession(constants.SessionUserID) + logger.Debugf("Logout initiated user_id[%v]", userID) -// @router /signup [post] -func (c *AuthHandler) Signup() { - var req models.User - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") + err := h.DestroySession() + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to destroy session: %s", err), err) return } - // Hash password - hashedPassword, err := bcrypt.GenerateFromPassword([]byte(req.Password), bcrypt.DefaultCost) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to process password") + utils.SuccessResponse(&h.Controller, "logout successful", nil) +} + +// @router /signup [post] +func (h *Handler) Signup() { + var req models.User + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, constants.ValidationInvalidRequestFormat, err) return } - req.Password = string(hashedPassword) - if err := c.userORM.Create(&req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusConflict, "Username already exists") + if err := h.etl.Signup(h.Ctx.Request.Context(), &req); err != nil { + switch { + case errors.Is(err, constants.ErrUserAlreadyExists): + utils.ErrorResponse(&h.Controller, http.StatusConflict, fmt.Sprintf("Username already exists: %s", err), err) + case errors.Is(err, constants.ErrPasswordProcessing): + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to process password: %s", err), err) + default: + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to create user: %s", err), err) + } return } - utils.SuccessResponse(&c.Controller, map[string]interface{}{ + utils.SuccessResponse(&h.Controller, "user created successfully", map[string]interface{}{ "email": req.Email, "username": req.Username, }) } // @router /telemetry-id [get] -func (c *AuthHandler) GetTelemetryID() { +func (h *Handler) GetTelemetryID() { + logger.Infof("Get telemetry ID initiated") + telemetryID := telemetry.GetTelemetryUserID() - utils.SuccessResponse(&c.Controller, map[string]interface{}{ + utils.SuccessResponse(&h.Controller, "telemetry ID fetched successfully", map[string]interface{}{ telemetry.TelemetryUserIDFile: string(telemetryID), }) } diff --git a/server/internal/handlers/destination.go b/server/internal/handlers/destination.go index d1c46664..c3012323 100644 --- a/server/internal/handlers/destination.go +++ b/server/internal/handlers/destination.go @@ -1,339 +1,213 @@ package handlers import ( - "context" - "encoding/json" + "errors" "fmt" "net/http" - "time" - "github.com/beego/beego/v2/core/logs" - "github.com/beego/beego/v2/server/web" - - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "github.com/datazip/olake-frontend/server/internal/temporal" - "github.com/datazip/olake-frontend/server/utils" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) -type DestHandler struct { - web.Controller - destORM *database.DestinationORM - jobORM *database.JobORM - userORM *database.UserORM - tempClient *temporal.Client -} - -func (c *DestHandler) Prepare() { - c.destORM = database.NewDestinationORM() - c.jobORM = database.NewJobORM() - c.userORM = database.NewUserORM() - var err error - c.tempClient, err = temporal.NewClient() +// @router /project/:projectid/destinations [get] +func (h *Handler) ListDestinations() { + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - logs.Error("Failed to create Temporal client: %v", err) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } -} -// @router /project/:projectid/destinations [get] -func (c *DestHandler) GetAllDestinations() { - projectIDStr := c.Ctx.Input.Param(":projectid") - destinations, err := c.destORM.GetAllByProjectID(projectIDStr) + items, err := h.etl.ListDestinations(h.Ctx.Request.Context(), projectID) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to retrieve destinations") + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get destinations: %s", err), err) return } - destItems := make([]models.DestinationDataItem, 0, len(destinations)) - for _, dest := range destinations { - item := models.DestinationDataItem{ - ID: dest.ID, - Name: dest.Name, - Type: dest.DestType, - Version: dest.Version, - Config: dest.Config, - CreatedAt: dest.CreatedAt.Format(time.RFC3339), - UpdatedAt: dest.UpdatedAt.Format(time.RFC3339), - } - - setUsernames(&item.CreatedBy, &item.UpdatedBy, dest.CreatedBy, dest.UpdatedBy) - - jobs, err := c.jobORM.GetByDestinationID(dest.ID) - var success bool - item.Jobs, success = buildJobDataItems(jobs, err, projectIDStr, "destination", c.tempClient, &c.Controller) - if !success { - return // Error occurred in buildJobDataItems - } - - destItems = append(destItems, item) - } - - utils.SuccessResponse(&c.Controller, destItems) + utils.SuccessResponse(&h.Controller, "Destinations listed successfully", items) } // @router /project/:projectid/destinations [post] -func (c *DestHandler) CreateDestination() { - // Get project ID from path - projectIDStr := c.Ctx.Input.Param(":projectid") +func (h *Handler) CreateDestination() { + userID := GetUserIDFromSession(&h.Controller) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", errors.New("not authenticated")) + return + } - var req models.CreateDestinationRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") + projectID, err := GetProjectIDFromPath(&h.Controller) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Convert request to Destination model - destination := &models.Destination{ - Name: req.Name, - DestType: req.Type, - Version: req.Version, - Config: req.Config, - ProjectID: projectIDStr, + + var req dto.CreateDestinationRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Set created by if user is logged in - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - destination.CreatedBy = user - destination.UpdatedBy = user + if err := dto.ValidateDestinationType(req.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - if err := c.destORM.Create(destination); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to create destination: %s", err)) + logger.Debugf("Create destination initiated project_id[%s] destination_type[%s] destination_name[%s] user_id[%v]", + projectID, req.Type, req.Name, userID) + + if err := h.etl.CreateDestination(h.Ctx.Request.Context(), &req, projectID, userID); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to create destination: %s", err), err) return } - // Track destination creation event - telemetry.TrackDestinationCreation(context.Background(), destination) - utils.SuccessResponse(&c.Controller, req) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("destination %s created successfully", req.Name), req) } // @router /project/:projectid/destinations/:id [put] -func (c *DestHandler) UpdateDestination() { - // Get destination ID from path - id := GetIDFromPath(&c.Controller) - projectID := c.Ctx.Input.Param(":projectid") - var req models.UpdateDestinationRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +func (h *Handler) UpdateDestination() { + userID := GetUserIDFromSession(&h.Controller) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", errors.New("not authenticated")) return } - // Get existing destination - existingDest, err := c.destORM.GetByID(id) + + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Destination not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Update fields - existingDest.Name = req.Name - existingDest.DestType = req.Type - existingDest.Version = req.Version - existingDest.Config = req.Config - existingDest.UpdatedAt = time.Now() - - // Update user who made changes - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - existingDest.UpdatedBy = user - } - - // Find jobs linked to this source - jobs, err := c.jobORM.GetByDestinationID(existingDest.ID) + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to fetch jobs for destination %s", err)) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Cancel workflows for those jobs - for _, job := range jobs { - err := cancelJobWorkflow(c.tempClient, job, projectID) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to cancel workflow for job %s", err)) - return - } + var req dto.UpdateDestinationRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // persist update - if err := c.destORM.Update(existingDest); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to update destination %s", err)) + if err := dto.ValidateDestinationType(req.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Track destinations status after update - telemetry.TrackDestinationsStatus(context.Background()) + logger.Debugf("Update destination initiated project_id[%s], destination_id[%d], destination_type[%s], user_id[%v]", + projectID, id, req.Type, userID) - utils.SuccessResponse(&c.Controller, req) + if err := h.etl.UpdateDestination(h.Ctx.Request.Context(), id, projectID, &req, userID); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to update destination: %s", err), err) + return + } + utils.SuccessResponse(&h.Controller, fmt.Sprintf("destination %s updated successfully", req.Name), req) } // @router /project/:projectid/destinations/:id [delete] -func (c *DestHandler) DeleteDestination() { - // Get destination ID from path - id := GetIDFromPath(&c.Controller) - // Get the name for the response - dest, err := c.destORM.GetByID(id) +func (h *Handler) DeleteDestination() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Destination not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - jobs, err := c.jobORM.GetByDestinationID(id) + logger.Debugf("Delete destination initiated destination_id[%d]", id) + + resp, err := h.etl.DeleteDestination(h.Ctx.Request.Context(), id) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to get source by id") - } - for _, job := range jobs { - job.Active = false - if err := c.jobORM.Update(job); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to deactivate jobs using this destination") - return - } - } - if err := c.destORM.Delete(id); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to delete destination") + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to delete destination: %s", err), err) return } - // Track destinations status after deletion - telemetry.TrackDestinationsStatus(context.Background()) - - utils.SuccessResponse(&c.Controller, &models.DeleteDestinationResponse{ - Name: dest.Name, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("destination %s deleted successfully", resp.Name), resp) } // @router /project/:projectid/destinations/test [post] -func (c *DestHandler) TestConnection() { - var req models.DestinationTestConnectionRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") - return - } - if req.Type == "" { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "valid destination type is required") +func (h *Handler) TestDestinationConnection() { + // need to remove sourceVersion from request + var req dto.DestinationTestConnectionRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - if req.Version == "" || req.Version == "latest" { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "valid destination version required") - return - } + logger.Infof("Test destination connection initiated destination_type[%s] destination_version[%s]", req.Type, req.Version) - // Determine driver and available tags - version := req.Version - driver := req.Source - if driver == "" { - var err error - _, driver, err = utils.GetDriverImageTags(c.Ctx.Request.Context(), "", true) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get valid driver image tags: %s", err)) - return - } - } - - encryptedConfig, err := utils.Encrypt(req.Config) + result, logs, err := h.etl.TestDestinationConnection(h.Ctx.Request.Context(), &req) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to encrypt destination config: "+err.Error()) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to verify driver credentials: %s", err), err) return } - result, err := c.tempClient.TestConnection(c.Ctx.Request.Context(), "destination", driver, version, encryptedConfig) - if result == nil { - result = map[string]interface{}{ - "message": err.Error(), - "status": "failed", - } - } - utils.SuccessResponse(&c.Controller, result) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("destination %s connection tested successfully", req.Type), dto.TestConnectionResponse{ + ConnectionResult: result, + Logs: logs, + }) } // @router /destinations/:id/jobs [get] -func (c *DestHandler) GetDestinationJobs() { - id := GetIDFromPath(&c.Controller) - // Check if destination exists - _, err := c.destORM.GetByID(id) +func (h *Handler) GetDestinationJobs() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Destination not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Create a job ORM and get jobs by destination ID - jobORM := database.NewJobORM() - jobs, err := jobORM.GetByDestinationID(id) + logger.Debugf("Get destination jobs initiated destination_id[%d]", id) + + jobs, err := h.etl.GetDestinationJobs(h.Ctx.Request.Context(), id) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to retrieve jobs") + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get jobs related to destination: %s", err), err) return } - - // Format as required by API contract - utils.SuccessResponse(&c.Controller, map[string]interface{}{ - "jobs": jobs, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("destination %d jobs fetched successfully", id), map[string]interface{}{"jobs": jobs}) } // @router /project/:projectid/destinations/versions [get] -func (c *DestHandler) GetDestinationVersions() { - // Get destination type from query parameter - destType := c.GetString("type") - if destType == "" { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Destination type is required") +func (h *Handler) GetDestinationVersions() { + projectID, err := GetProjectIDFromPath(&h.Controller) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // get available driver versions - versions, _, err := utils.GetDriverImageTags(c.Ctx.Request.Context(), "", true) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to fetch driver versions: %s", err)) + destType := h.GetString("type") + if err := dto.ValidateDestinationType(destType); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - utils.SuccessResponse(&c.Controller, map[string]interface{}{ - "version": versions, - }) -} - -// @router /project/:projectid/destinations/spec [post] -func (c *DestHandler) GetDestinationSpec() { - _ = c.Ctx.Input.Param(":projectid") + logger.Debugf("Get destination versions initiated project_id[%s] destination_type[%s]", projectID, destType) - var req models.SpecRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") + versions, err := h.etl.GetDestinationVersions(h.Ctx.Request.Context(), destType) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to get destination versions: %s", err), err) return } - var specOutput models.SpecOutput - var err error - // TODO: make destinationType consistent. Only use parquet and iceberg. - destinationType := "iceberg" - if req.Type == "s3" { - destinationType = "parquet" - } - version := req.Version + utils.SuccessResponse(&h.Controller, fmt.Sprintf("destination %s versions fetched successfully", destType), versions) +} - // Determine driver and available tags - _, driver, err := utils.GetDriverImageTags(c.Ctx.Request.Context(), "", true) +// @router /project/:projectid/destinations/spec [post] +func (h *Handler) GetDestinationSpec() { + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get valid driver image tags: %s", err)) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - if c.tempClient != nil { - specOutput, err = c.tempClient.FetchSpec( - c.Ctx.Request.Context(), - destinationType, - driver, - version, - ) + var req dto.SpecRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } + + logger.Debugf("Get destination spec initiated project_id[%s] destination_type[%s] destination_version[%s]", + projectID, req.Type, req.Version) + + resp, err := h.etl.GetDestinationSpec(h.Ctx.Request.Context(), &req) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to get spec: %v", err)) + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get destination spec: %s", err), err) return } - - utils.SuccessResponse(&c.Controller, models.SpecResponse{ - Version: req.Version, - Type: req.Type, - Spec: specOutput.Spec, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("destination %s spec fetched successfully", req.Type), resp) } diff --git a/server/internal/handlers/frontend_handlers.go b/server/internal/handlers/frontend_handlers.go deleted file mode 100644 index 040cd75b..00000000 --- a/server/internal/handlers/frontend_handlers.go +++ /dev/null @@ -1,22 +0,0 @@ -package handlers - -import ( - "net/http" - "path/filepath" - - "github.com/beego/beego/v2/server/web" -) - -type FrontendHandler struct { - web.Controller -} - -func (c *FrontendHandler) Get() { - const indexPath = "/opt/frontend/dist/index.html" - - // Set Content-Type early - c.Ctx.Output.ContentType("text/html") - - // Use built-in file serving for efficiency and proper headers - http.ServeFile(c.Ctx.ResponseWriter, c.Ctx.Request, filepath.Clean(indexPath)) -} diff --git a/server/internal/handlers/handler.go b/server/internal/handlers/handler.go new file mode 100644 index 00000000..1719972a --- /dev/null +++ b/server/internal/handlers/handler.go @@ -0,0 +1,25 @@ +package handlers + +import ( + "github.com/beego/beego/v2/server/web" + services "github.com/datazip-inc/olake-ui/server/internal/services/etl" +) + +type Handler struct { + web.Controller + etl *services.ETLService +} + +// appService holds the singleton service instance injected at startup. +var etl *services.ETLService + +func NewHandler(s *services.ETLService) *Handler { + etl = s + return &Handler{etl: s} +} + +// Prepare runs before each action; Beego constructs a fresh controller per request, +// so we assign the shared AppService here to avoid nil dereferences. +func (h *Handler) Prepare() { + h.etl = etl +} diff --git a/server/internal/handlers/handlers_utils.go b/server/internal/handlers/handlers_utils.go deleted file mode 100644 index 9f80e997..00000000 --- a/server/internal/handlers/handlers_utils.go +++ /dev/null @@ -1,120 +0,0 @@ -package handlers - -import ( - "context" - "fmt" - "net/http" - "strconv" - "time" - - "github.com/beego/beego/v2/server/web" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/internal/temporal" - "github.com/datazip/olake-frontend/server/utils" - "go.temporal.io/api/workflowservice/v1" -) - -// get id from path -func GetIDFromPath(c *web.Controller) int { - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) - if err != nil { - utils.ErrorResponse(c, http.StatusBadRequest, "Invalid id") - return 0 - } - return id -} - -// setUsernames sets the created and updated usernames if available -func setUsernames(createdBy, updatedBy *string, creator, updater *models.User) { - if creator != nil { - *createdBy = creator.Username - } - if updater != nil { - *updatedBy = updater.Username - } -} - -// buildJobDataItems creates job data items with workflow information -// Returns (jobItems, success). If success is false, an error occurred and the handler should return. -func buildJobDataItems(jobs []*models.Job, err error, projectIDStr, contextType string, tempClient *temporal.Client, controller *web.Controller) ([]models.JobDataItem, bool) { - jobItems := make([]models.JobDataItem, 0) - - if err != nil { - return jobItems, true // No jobs is OK, return empty slice - } - - for _, job := range jobs { - jobInfo := models.JobDataItem{ - Name: job.Name, - ID: job.ID, - Activate: job.Active, - } - - // Set source/destination info based on context - if contextType == "source" && job.DestID != nil { - jobInfo.DestinationName = job.DestID.Name - jobInfo.DestinationType = job.DestID.DestType - } else if contextType == "destination" && job.SourceID != nil { - jobInfo.SourceName = job.SourceID.Name - jobInfo.SourceType = job.SourceID.Type - } - - if !setJobWorkflowInfo(&jobInfo, job.ID, projectIDStr, tempClient, controller) { - return nil, false // Error occurred, signal failure - } - jobItems = append(jobItems, jobInfo) - } - - return jobItems, true -} - -// setJobWorkflowInfo fetches and sets workflow execution information for a job -// Returns false if an error occurred that should stop processing -func setJobWorkflowInfo(jobInfo *models.JobDataItem, jobID int, projectIDStr string, tempClient *temporal.Client, controller *web.Controller) bool { - query := fmt.Sprintf("WorkflowId between 'sync-%s-%d' and 'sync-%s-%d-~'", projectIDStr, jobID, projectIDStr, jobID) - - resp, err := tempClient.ListWorkflow(context.Background(), &workflowservice.ListWorkflowExecutionsRequest{ - Query: query, - PageSize: 1, - }) - - if err != nil { - utils.ErrorResponse(controller, http.StatusInternalServerError, fmt.Sprintf("failed to list workflows: %v", err)) - return false - } - - if len(resp.Executions) > 0 { - jobInfo.LastRunTime = resp.Executions[0].StartTime.AsTime().Format(time.RFC3339) - jobInfo.LastRunState = resp.Executions[0].Status.String() - } else { - jobInfo.LastRunTime = "" - jobInfo.LastRunState = "" - } - return true -} - -func cancelJobWorkflow(tempClient *temporal.Client, job *models.Job, projectID string) error { - query := fmt.Sprintf( - "WorkflowId BETWEEN 'sync-%s-%d' AND 'sync-%s-%d-~' AND ExecutionStatus = 'Running'", - projectID, job.ID, projectID, job.ID, - ) - - resp, err := tempClient.ListWorkflow(context.Background(), &workflowservice.ListWorkflowExecutionsRequest{ - Query: query, - }) - if err != nil { - return fmt.Errorf("list workflows failed: %s", err) - } - if len(resp.Executions) == 0 { - return nil // no running workflows - } - - for _, wfExec := range resp.Executions { - if err := tempClient.CancelWorkflow(context.Background(), - wfExec.Execution.WorkflowId, wfExec.Execution.RunId); err != nil { - return fmt.Errorf("failed to cancel workflow[%s]: %s", wfExec.Execution.WorkflowId, err) - } - } - return nil -} diff --git a/server/internal/handlers/job.go b/server/internal/handlers/job.go index 66f5ecb7..1647750b 100644 --- a/server/internal/handlers/job.go +++ b/server/internal/handlers/job.go @@ -1,712 +1,329 @@ package handlers import ( - "context" - "crypto/sha256" "encoding/json" "fmt" "net/http" - "os" - "path/filepath" - "strconv" - "strings" - "time" - - "github.com/beego/beego/v2/core/logs" - "github.com/beego/beego/v2/server/web" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/docker" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "github.com/datazip/olake-frontend/server/internal/temporal" - "github.com/datazip/olake-frontend/server/utils" - "go.temporal.io/api/workflowservice/v1" -) - -type JobHandler struct { - web.Controller - jobORM *database.JobORM - sourceORM *database.SourceORM - destORM *database.DestinationORM - userORM *database.UserORM - tempClient *temporal.Client -} -// Prepare initializes the ORM instances -func (c *JobHandler) Prepare() { - c.jobORM = database.NewJobORM() - c.sourceORM = database.NewSourceORM() - c.destORM = database.NewDestinationORM() - c.userORM = database.NewUserORM() - var err error - c.tempClient, err = temporal.NewClient() - if err != nil { - logs.Error("Failed to create Temporal client: %v", err) - } -} + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/logger" +) // @router /project/:projectid/jobs [get] -func (c *JobHandler) GetAllJobs() { - projectIDStr := c.Ctx.Input.Param(":projectid") - // Get jobs with optional filtering - jobs, err := c.jobORM.GetAllByProjectID(projectIDStr) +func (h *Handler) ListJobs() { + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to retrieve jobs by project ID") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Transform to response format - jobResponses := make([]models.JobResponse, 0, len(jobs)) - for _, job := range jobs { - jobResp := models.JobResponse{ - ID: job.ID, - Name: job.Name, - StreamsConfig: job.StreamsConfig, - Frequency: job.Frequency, - CreatedAt: job.CreatedAt.Format(time.RFC3339), - UpdatedAt: job.UpdatedAt.Format(time.RFC3339), - Activate: job.Active, - } - - // Set source and destination details - if job.SourceID != nil { - jobResp.Source = models.JobSourceConfig{ - Name: job.SourceID.Name, - Type: job.SourceID.Type, - Config: job.SourceID.Config, - Version: job.SourceID.Version, - } - } - - if job.DestID != nil { - jobResp.Destination = models.JobDestinationConfig{ - Name: job.DestID.Name, - Type: job.DestID.DestType, - Config: job.DestID.Config, - Version: job.DestID.Version, - } - } + logger.Debugf("Get all jobs initiated project_id[%s]", projectID) - // Set user details - if job.CreatedBy != nil { - jobResp.CreatedBy = job.CreatedBy.Username - } - if job.UpdatedBy != nil { - jobResp.UpdatedBy = job.UpdatedBy.Username - } - - // Get workflow information if Temporal client is available - if c.tempClient != nil { - query := fmt.Sprintf("WorkflowId between 'sync-%s-%d' and 'sync-%s-%d-~'", projectIDStr, job.ID, projectIDStr, job.ID) - if resp, err := c.tempClient.ListWorkflow(context.Background(), &workflowservice.ListWorkflowExecutionsRequest{ - Query: query, - PageSize: 1, - }); err != nil { - logs.Error("Failed to list workflows: %v", err) - } else if len(resp.Executions) > 0 { - jobResp.LastRunTime = resp.Executions[0].StartTime.AsTime().Format(time.RFC3339) - jobResp.LastRunState = resp.Executions[0].Status.String() - } - } - - jobResponses = append(jobResponses, jobResp) + jobs, err := h.etl.ListJobs(h.Ctx.Request.Context(), projectID) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to retrieve jobs by project ID: %s", err), err) + return } - - utils.SuccessResponse(&c.Controller, jobResponses) + utils.SuccessResponse(&h.Controller, "jobs listed successfully", jobs) } // @router /project/:projectid/jobs [post] -func (c *JobHandler) CreateJob() { - // Get project ID from path - projectIDStr := c.Ctx.Input.Param(":projectid") - // Parse request body - var req models.CreateJobRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") - return - } - unique, err := c.jobORM.IsJobNameUnique(projectIDStr, req.Name) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to check job name uniqueness") - return - } - if !unique { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Job name already exists") - return - } - // Find or create source - source, err := c.getOrCreateSource(&req.Source, projectIDStr) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to process source: %s", err)) +func (h *Handler) CreateJob() { + userID := GetUserIDFromSession(&h.Controller) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", fmt.Errorf("not authenticated")) return } - // Find or create destination - dest, err := c.getOrCreateDestination(&req.Destination, projectIDStr) + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to process destination: %s", err)) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Create job model - job := &models.Job{ - Name: req.Name, - SourceID: source, - DestID: dest, - Active: true, - Frequency: req.Frequency, - StreamsConfig: req.StreamsConfig, - State: "{}", - ProjectID: projectIDStr, + var req dto.CreateJobRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Get user information from session - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - job.CreatedBy = user - job.UpdatedBy = user + // Conditional validation + if req.Source.ID == nil { + if err := dto.ValidateSourceType(req.Source.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return + } + if req.Source.Name == "" || req.Source.Version == "" || req.Source.Config == "" { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, "source name, version, and config are required when source id is not provided", err) + return + } } - if err := c.jobORM.Create(job); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to create job: %s", err)) - return + if req.Destination.ID == nil { + if err := dto.ValidateDestinationType(req.Destination.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return + } + if req.Destination.Name == "" || req.Destination.Version == "" || req.Destination.Config == "" { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, "destination name, version, and config are required when destination id is not provided", err) + return + } } - // telemetry events - telemetry.TrackJobCreation(context.Background(), job) + logger.Debugf("Create job initiated project_id[%s] job_name[%s] user_id[%v]", projectID, req.Name, userID) - if c.tempClient != nil { - fmt.Println("Using Temporal workflow for sync job") - _, err = c.tempClient.ManageSync( - c.Ctx.Request.Context(), - job.ProjectID, - job.ID, - job.Frequency, - temporal.ActionCreate, - ) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Temporal workflow execution failed for create job schedule: %s", err)) - } + if err := h.etl.CreateJob(h.Ctx.Request.Context(), &req, projectID, userID); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to create job: %s", err), err) + return } - - utils.SuccessResponse(&c.Controller, req) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("job '%s' created successfully", req.Name), nil) } // @router /project/:projectid/jobs/:id [put] -func (c *JobHandler) UpdateJob() { - // Get project ID and job ID from path - projectIDStr := c.Ctx.Input.Param(":projectid") - id := GetIDFromPath(&c.Controller) - - // Parse request body - var req models.UpdateJobRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +func (h *Handler) UpdateJob() { + userID := GetUserIDFromSession(&h.Controller) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", fmt.Errorf("not authenticated")) return } - // Get existing job - existingJob, err := c.jobORM.GetByID(id, true) + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Job not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Find or create source - source, err := c.getOrCreateSource(&req.Source, projectIDStr) + jobID, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to process source: %s", err)) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Find or create destination - dest, err := c.getOrCreateDestination(&req.Destination, projectIDStr) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to process destination: %s", err)) + var req dto.UpdateJobRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Update fields - existingJob.Name = req.Name - existingJob.SourceID = source - existingJob.DestID = dest - existingJob.Active = req.Activate - existingJob.Frequency = req.Frequency - existingJob.StreamsConfig = req.StreamsConfig - existingJob.UpdatedAt = time.Now() - existingJob.ProjectID = projectIDStr - - // Update user information - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - existingJob.UpdatedBy = user - } - - // cancel existing workflow - err = cancelJobWorkflow(c.tempClient, existingJob, projectIDStr) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to cancel workflow for job %s", err)) - return - } - // Update job in database - if err := c.jobORM.Update(existingJob); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to update job") - return + if req.Source.ID == nil { + if err := dto.ValidateSourceType(req.Source.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return + } + if req.Source.Name == "" || req.Source.Version == "" || req.Source.Config == "" { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, "source name, version, and config are required when source id is not provided", err) + return + } } - - // Track sources and destinations status after job update - telemetry.TrackJobEntity(context.Background()) - - if c.tempClient != nil { - logs.Info("Using Temporal workflow for sync job") - _, err = c.tempClient.ManageSync( - c.Ctx.Request.Context(), - existingJob.ProjectID, - existingJob.ID, - existingJob.Frequency, - temporal.ActionUpdate, - ) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Temporal workflow execution failed for update job schedule: %s", err)) + if req.Destination.ID == nil { + if err := dto.ValidateDestinationType(req.Destination.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return + } + if req.Destination.Name == "" || req.Destination.Version == "" || req.Destination.Config == "" { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, "destination name, version, and config are required when destination id is not provided", err) return } } - utils.SuccessResponse(&c.Controller, req) -} + logger.Debugf("Update job initiated project_id[%s] job_id[%d] job_name[%s] user_id[%v]", projectID, jobID, req.Name, userID) -// @router /project/:projectid/jobs/:id [delete] -func (c *JobHandler) DeleteJob() { - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid job ID") + if err := h.etl.UpdateJob(h.Ctx.Request.Context(), &req, projectID, jobID, userID); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to update job: %s", err), err) return } + utils.SuccessResponse(&h.Controller, fmt.Sprintf("job '%s' updated successfully", req.Name), nil) +} - // Get job name for response - job, err := c.jobORM.GetByID(id, true) +// @router /project/:projectid/jobs/:id [delete] +func (h *Handler) DeleteJob() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Job not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // cancel existing workflow - err = cancelJobWorkflow(c.tempClient, job, job.ProjectID) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to cancel workflow for job %s", err)) - return - } - jobName := job.Name - if c.tempClient != nil { - logs.Info("Using Temporal workflow for delete job schedule") - _, err = c.tempClient.ManageSync( - c.Ctx.Request.Context(), - job.ProjectID, - job.ID, - job.Frequency, - temporal.ActionDelete, - ) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Temporal workflow execution failed for delete job schedule: %s", err)) - return - } - } - // Delete job - if err := c.jobORM.Delete(id); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to delete job") + logger.Infof("Delete job initiated job_id[%d]", id) + + jobName, err := h.etl.DeleteJob(h.Ctx.Request.Context(), id) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to delete job: %s", err), err) return } - - // Track sources and destinations status after job deletion - telemetry.TrackJobEntity(context.Background()) - - utils.SuccessResponse(&c.Controller, models.DeleteDestinationResponse{ - Name: jobName, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("job '%s' deleted successfully", jobName), nil) } // @router /project/:projectid/jobs/check-unique [post] -func (c *JobHandler) CheckUniqueJobName() { - projectIDStr := c.Ctx.Input.Param(":projectid") - var req models.CheckUniqueJobNameRequest - - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +func (h *Handler) CheckUniqueJobName() { + projectID, err := GetProjectIDFromPath(&h.Controller) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - if req.JobName == "" { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Job name is required") + + var req dto.CheckUniqueJobNameRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - unique, err := c.jobORM.IsJobNameUnique(projectIDStr, req.JobName) + + logger.Infof("Check unique job name initiated project_id[%s] job_name[%s]", projectID, req.JobName) + + unique, err := h.etl.CheckUniqueJobName(h.Ctx.Request.Context(), projectID, req) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to check job name uniqueness") + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to check job name uniqueness: %s", err), err) return } - utils.SuccessResponse(&c.Controller, models.CheckUniqueJobNameResponse{ - Unique: unique, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("job name '%s' uniqueness checked successfully", req.JobName), dto.CheckUniqueJobNameResponse{Unique: unique}) } // @router /project/:projectid/jobs/:id/sync [post] -func (c *JobHandler) SyncJob() { - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) +func (h *Handler) SyncJob() { + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid job ID") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Check if job exists - job, err := c.jobORM.GetByID(id, true) + + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Job not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Validate source and destination exist - if job.SourceID == nil || job.DestID == nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Job must have both source and destination configured") - return - } + logger.Infof("Sync trigger initiated for project_id[%s] job_id[%d]", projectID, id) - if c.tempClient != nil { - logs.Info("Using Temporal workflow for sync job") - _, err = c.tempClient.ManageSync( - c.Ctx.Request.Context(), - job.ProjectID, - job.ID, - job.Frequency, - temporal.ActionTrigger, - ) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Temporal workflow execution failed for sync job: %s", err)) - return - } + result, err := h.etl.SyncJob(h.Ctx.Request.Context(), projectID, id) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to trigger sync: %s", err), err) + return } - utils.SuccessResponse(&c.Controller, nil) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("sync triggered successfully for job_id[%d]", id), result) } -// @router /project/:projectid/jobs/:id/activate [post] -func (c *JobHandler) ActivateJob() { - id := GetIDFromPath(&c.Controller) - - // Parse request body - var req models.JobStatus - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +// @router /project/:projectid/jobs/:id/activate [put] +func (h *Handler) ActivateJob() { + userID := GetUserIDFromSession(&h.Controller) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", fmt.Errorf("not authenticated")) return } - // Get existing job - job, err := c.jobORM.GetByID(id, true) + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Job not found") - return - } - action := temporal.ActionUnpause - if !req.Activate { - action = temporal.ActionPause - } - if c.tempClient != nil { - logs.Info("Using Temporal workflow for activate job schedule") - _, err = c.tempClient.ManageSync( - c.Ctx.Request.Context(), - job.ProjectID, - job.ID, - job.Frequency, - action, - ) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Temporal workflow execution failed for activate job schedule: %s", err)) - return - } + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Update activation status - job.Active = req.Activate - job.UpdatedAt = time.Now() - // Update user information - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - job.UpdatedBy = user + var req dto.JobStatusRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Update job in database - if err := c.jobORM.Update(job); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to update job activation status") + logger.Debugf("Activate job initiated job_id[%d] user_id[%v]", id, userID) + + if err := h.etl.ActivateJob(h.Ctx.Request.Context(), id, req, userID); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to activate job: %s", err), err) return } - - utils.SuccessResponse(&c.Controller, req) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("job %d %s successfully", id, utils.Ternary(req.Activate, "resumed", "paused")), nil) } -// @router /project/:projectid/jobs/:id/cancel [get] -func (c *JobHandler) CancelJobRun() { - // Parse inputs - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) +// @router /project/:projectid/jobs/:id/cancel [post] +func (h *Handler) CancelJobRun() { + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid job ID") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - projectID := c.Ctx.Input.Param(":projectid") - // Ensure job exists - job, err := c.jobORM.GetByID(id, true) + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, fmt.Sprintf("Job not found: %v", err)) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - if err := cancelJobWorkflow(c.tempClient, job, projectID); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("job workflow cancel failed: %v", err)) + logger.Infof("Cancel job run initiated project_id[%s] job_id[%d]", projectID, id) + + if err := h.etl.CancelJobRun(h.Ctx.Request.Context(), projectID, id); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to cancel job run: %s", err), err) return } - - utils.SuccessResponse(&c.Controller, map[string]any{ - "message": "Job Cancellation initiated. Completion may take up to a minute", - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("job workflow cancel requested successfully for job_id[%d]", id), nil) } // @router /project/:projectid/jobs/:id/tasks [get] -func (c *JobHandler) GetJobTasks() { - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) +func (h *Handler) GetJobTasks() { + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid job ID") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - projectIDStr := c.Ctx.Input.Param(":projectid") - // Get job to verify it exists - job, err := c.jobORM.GetByID(id, true) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Job not found") - return - } - var tasks []models.JobTask - // Construct a query for workflows related to this project and job - query := fmt.Sprintf("WorkflowId between 'sync-%s-%d' and 'sync-%s-%d-~'", projectIDStr, job.ID, projectIDStr, job.ID) - // List workflows using the direct query - resp, err := c.tempClient.ListWorkflow(context.Background(), &workflowservice.ListWorkflowExecutionsRequest{ - Query: query, - }) + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to list workflows: %v", err)) + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - for _, execution := range resp.Executions { - startTime := execution.StartTime.AsTime().UTC() - var runTime string - if execution.CloseTime != nil { - runTime = execution.CloseTime.AsTime().UTC().Sub(startTime).Round(time.Second).String() - } else { - runTime = time.Since(startTime).Round(time.Second).String() - } - tasks = append(tasks, models.JobTask{ - Runtime: runTime, - StartTime: startTime.Format(time.RFC3339), - Status: execution.Status.String(), - FilePath: execution.Execution.WorkflowId, - }) - } - utils.SuccessResponse(&c.Controller, tasks) -} + logger.Debugf("Get job tasks initiated project_id[%s] job_id[%d]", projectID, id) -// @router /project/:projectid/jobs/:id/tasks/:taskid/logs [post] -func (c *JobHandler) GetTaskLogs() { - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) + tasks, err := h.etl.GetJobTasks(h.Ctx.Request.Context(), projectID, id) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid job ID") - return - } - - // Parse request body - var req struct { - FilePath string `json:"file_path"` - } - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get job tasks: %s", err), err) return } + utils.SuccessResponse(&h.Controller, fmt.Sprintf("job tasks listed successfully for job_id[%d]", id), tasks) +} - // Verify job exists - _, err = c.jobORM.GetByID(id, true) +// @router /project/:projectid/jobs/:id/logs [get] +func (h *Handler) GetTaskLogs() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Job not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - syncFolderName := fmt.Sprintf("%x", sha256.Sum256([]byte(req.FilePath))) - // Read the log file - // Get home directory - homeDir := docker.GetDefaultConfigDir() - mainSyncDir := filepath.Join(homeDir, syncFolderName) - if _, err := os.Stat(mainSyncDir); os.IsNotExist(err) { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, fmt.Sprintf("No sync directory found: %s", mainSyncDir)) + var req dto.JobTaskRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Look for log files in the logs directory - logsDir := filepath.Join(mainSyncDir, "logs") - if _, err := os.Stat(logsDir); os.IsNotExist(err) { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Logs directory not found") - return - } + logger.Debugf("Get task logs initiated job_id[%d] file_path[%s]", id, req.FilePath) - // Since there is only one sync folder in logs, we can get it directly - files, err := os.ReadDir(logsDir) - if err != nil || len(files) == 0 { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "No sync log directory found") - return - } - - // Use the first directory we find (since there's only one) - syncDir := filepath.Join(logsDir, files[0].Name()) - - // Define the log file path - logPath := filepath.Join(syncDir, "olake.log") - - logContent, err := os.ReadFile(logPath) + logs, err := h.etl.GetTaskLogs(h.Ctx.Request.Context(), id, req.FilePath) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to read log file : %s", logPath)) + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get task logs: %s", err), err) return } - - // Parse log entries - var logs []map[string]interface{} - lines := strings.Split(string(logContent), "\n") - for _, line := range lines { - if line == "" { - continue - } - - var logEntry struct { - Level string `json:"level"` - Time time.Time `json:"time"` - Message string `json:"message"` - } - - if err := json.Unmarshal([]byte(line), &logEntry); err != nil { - continue - } - if logEntry.Level != "debug" { - logs = append(logs, map[string]interface{}{ - "level": logEntry.Level, - "time": logEntry.Time.UTC().Format(time.RFC3339), - "message": logEntry.Message, - }) - } - } - - utils.SuccessResponse(&c.Controller, logs) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("task logs retrieved successfully for job_id[%d]", id), logs) } -// Helper methods - -// getOrCreateSource finds or creates a source based on the provided config -func (c *JobHandler) getOrCreateSource(config *models.JobSourceConfig, projectIDStr string) (*models.Source, error) { - // Try to find an existing source matching the criteria - sources, err := c.sourceORM.GetByNameAndType(config.Name, config.Type, projectIDStr) - if err == nil && len(sources) > 0 { - // Update the existing source if found - source := sources[0] - source.Config = config.Config - source.Version = config.Version - - // Get user info for update - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - source.UpdatedBy = user - } - - if err := c.sourceORM.Update(source); err != nil { - return nil, fmt.Errorf("failed to update source: %s", err) - } - - return source, nil - } - - // Create a new source if not found - source := &models.Source{ - Name: config.Name, - Type: config.Type, - Config: config.Config, - Version: config.Version, - ProjectID: projectIDStr, - } - - // Set user info - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - source.CreatedBy = user - source.UpdatedBy = user - } - - if err := c.sourceORM.Create(source); err != nil { - return nil, fmt.Errorf("failed to create source: %s", err) - } - - telemetry.TrackSourceCreation(context.Background(), source) - - return source, nil -} - -// getOrCreateDestination finds or creates a destination based on the provided config -func (c *JobHandler) getOrCreateDestination(config *models.JobDestinationConfig, projectIDStr string) (*models.Destination, error) { - // Try to find an existing destination matching the criteria - destinations, err := c.destORM.GetByNameAndType(config.Name, config.Type, projectIDStr) - if err == nil && len(destinations) > 0 { - // Update the existing destination if found - dest := destinations[0] - dest.Config = config.Config - dest.Version = config.Version - - // Get user info for update - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - dest.UpdatedBy = user - } - - if err := c.destORM.Update(dest); err != nil { - return nil, fmt.Errorf("failed to update destination: %s", err) - } - - return dest, nil +// @router /internal/worker/callback/sync-telemetry [post] +func (h *Handler) UpdateSyncTelemetry() { + var req struct { + JobID int `json:"job_id"` + WorkflowID string `json:"workflow_id"` + Event string `json:"event"` } - // Create a new destination if not found - dest := &models.Destination{ - Name: config.Name, - DestType: config.Type, - Config: config.Config, - Version: config.Version, - ProjectID: projectIDStr, + if err := json.Unmarshal(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, "Invalid request format", err) + return } - // Set user info - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - dest.CreatedBy = user - dest.UpdatedBy = user + if req.JobID == 0 || req.WorkflowID == "" { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, "job_id and workflow_id are required", nil) + return } - if err := c.destORM.Create(dest); err != nil { - return nil, fmt.Errorf("failed to create destination: %s", err) + if err := h.etl.UpdateSyncTelemetry(h.Ctx.Request.Context(), req.JobID, req.WorkflowID, req.Event); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, "Failed to update sync telemetry", err) + return } - // Track destination creation event - telemetry.TrackDestinationCreation(context.Background(), dest) - return dest, nil + utils.SuccessResponse(&h.Controller, fmt.Sprintf("sync telemetry updated successfully for job_id[%d] workflow_id[%s] event[%s]", req.JobID, req.WorkflowID, req.Event), nil) } diff --git a/server/internal/handlers/auth_middleware.go b/server/internal/handlers/middleware/auth.go similarity index 71% rename from server/internal/handlers/auth_middleware.go rename to server/internal/handlers/middleware/auth.go index 49f830ce..9adaf8b6 100644 --- a/server/internal/handlers/auth_middleware.go +++ b/server/internal/handlers/middleware/auth.go @@ -1,11 +1,11 @@ -package handlers +package middleware import ( "github.com/beego/beego/v2/server/web" "github.com/beego/beego/v2/server/web/context" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" ) // middleware only works if session is enabled @@ -14,7 +14,7 @@ func AuthMiddleware(ctx *context.Context) { if userID := ctx.Input.Session(constants.SessionUserID); userID == nil { // Send unauthorized response ctx.Output.SetStatus(401) - _ = ctx.Output.JSON(models.JSONResponse{ + _ = ctx.Output.JSON(dto.JSONResponse{ Message: "Unauthorized, try login again", Success: false, }, false, false) diff --git a/server/internal/handlers/source.go b/server/internal/handlers/source.go index 0824a018..c96a5510 100644 --- a/server/internal/handlers/source.go +++ b/server/internal/handlers/source.go @@ -1,345 +1,260 @@ package handlers import ( - "context" - "encoding/json" + "errors" "fmt" "net/http" - "time" - "github.com/beego/beego/v2/core/logs" - "github.com/beego/beego/v2/server/web" - - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "github.com/datazip/olake-frontend/server/internal/temporal" - "github.com/datazip/olake-frontend/server/utils" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) -type SourceHandler struct { - web.Controller - sourceORM *database.SourceORM - userORM *database.UserORM - jobORM *database.JobORM - tempClient *temporal.Client -} - -func (c *SourceHandler) Prepare() { - c.sourceORM = database.NewSourceORM() - c.userORM = database.NewUserORM() - c.jobORM = database.NewJobORM() - - // Initialize Temporal client - var err error - c.tempClient, err = temporal.NewClient() - if err != nil { - logs.Error("Failed to create Temporal client: %v", err) - } -} - // @router /project/:projectid/sources [get] -func (c *SourceHandler) GetAllSources() { - sources, err := c.sourceORM.GetAll() +func (h *Handler) ListSources() { + projectID, err := GetProjectIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to retrieve sources") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - projectIDStr := c.Ctx.Input.Param(":projectid") - sourceItems := make([]models.SourceDataItem, 0, len(sources)) - - for _, source := range sources { - item := models.SourceDataItem{ - ID: source.ID, - Name: source.Name, - Type: source.Type, - Version: source.Version, - Config: source.Config, - CreatedAt: source.CreatedAt.Format(time.RFC3339), - UpdatedAt: source.UpdatedAt.Format(time.RFC3339), - } - - setUsernames(&item.CreatedBy, &item.UpdatedBy, source.CreatedBy, source.UpdatedBy) + logger.Debugf("Get all sources initiated project_id[%s]", projectID) - jobs, err := c.jobORM.GetBySourceID(source.ID) - var success bool - item.Jobs, success = buildJobDataItems(jobs, err, projectIDStr, "source", c.tempClient, &c.Controller) - if !success { - return // Error occurred in buildJobDataItems - } - - sourceItems = append(sourceItems, item) + sources, err := h.etl.ListSources(h.Ctx.Request.Context(), projectID) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to retrieve sources: %s", err), err) + return } - - utils.SuccessResponse(&c.Controller, sourceItems) + utils.SuccessResponse(&h.Controller, "sources listed successfully", sources) } // @router /project/:projectid/sources [post] -func (c *SourceHandler) CreateSource() { - var req models.CreateSourceRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +func (h *Handler) CreateSource() { + userID := GetUserIDFromSession(&h.Controller) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", errors.New("not authenticated")) return } - // Convert request to Source model - source := &models.Source{ - Name: req.Name, - Type: req.Type, - Version: req.Version, - Config: req.Config, + projectID, err := GetProjectIDFromPath(&h.Controller) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Get project ID if needed - source.ProjectID = c.Ctx.Input.Param(":projectid") - - // Set created by if user is logged in - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user, err := c.userORM.GetByID(userID.(int)) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to get user") - return - } - source.CreatedBy = user - source.UpdatedBy = user + var req dto.CreateSourceRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - if err := c.sourceORM.Create(source); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to create source: %s", err)) + + if err := dto.ValidateSourceType(req.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Track source creation event - telemetry.TrackSourceCreation(context.Background(), source) + logger.Debugf("Create source initiated project_id[%s] source_type[%s] source_name[%s] user_id[%v]", + projectID, req.Type, req.Name, userID) + + if err := h.etl.CreateSource(h.Ctx.Request.Context(), &req, projectID, userID); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to create source: %s", err), err) + return + } - utils.SuccessResponse(&c.Controller, req) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %s created successfully", req.Name), req) } // @router /project/:projectid/sources/:id [put] -func (c *SourceHandler) UpdateSource() { - id := GetIDFromPath(&c.Controller) - projectID := c.Ctx.Input.Param(":projectid") - var req models.UpdateSourceRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +func (h *Handler) UpdateSource() { + userID := GetUserIDFromSession(&h.Controller) + if userID == nil { + utils.ErrorResponse(&h.Controller, http.StatusUnauthorized, "Not authenticated", errors.New("not authenticated")) return } - // Get existing source - existingSource, err := c.sourceORM.GetByID(id) + + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Source not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Update fields - existingSource.Name = req.Name - existingSource.Config = req.Config - existingSource.Type = req.Type - existingSource.Version = req.Version - existingSource.UpdatedAt = time.Now() - - userID := c.GetSession(constants.SessionUserID) - if userID != nil { - user := &models.User{ID: userID.(int)} - existingSource.UpdatedBy = user + projectID, err := GetProjectIDFromPath(&h.Controller) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Find jobs linked to this source - jobs, err := c.jobORM.GetBySourceID(existingSource.ID) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to fetch jobs for source %s", err)) + var req dto.UpdateSourceRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Cancel workflows for those jobs - for _, job := range jobs { - err := cancelJobWorkflow(c.tempClient, job, projectID) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to cancel workflow for job %s", err)) - return - } + if err := dto.ValidateSourceType(req.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Persist update - if err := c.sourceORM.Update(existingSource); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to update source %s", err)) + logger.Debugf("Update source initiated project_id[%s] source_id[%d] source_type[%s] user_id[%v]", + projectID, id, req.Type, userID) + + if err := h.etl.UpdateSource(h.Ctx.Request.Context(), projectID, id, &req, userID); err != nil { + status := http.StatusInternalServerError + if errors.Is(err, constants.ErrSourceNotFound) { + status = http.StatusNotFound + } + utils.ErrorResponse(&h.Controller, status, fmt.Sprintf("failed to update source: %s", err), err) return } - // Track sources status after update - telemetry.TrackSourcesStatus(context.Background()) - utils.SuccessResponse(&c.Controller, req) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %s updated successfully", req.Name), req) } // @router /project/:projectid/sources/:id [delete] -func (c *SourceHandler) DeleteSource() { - id := GetIDFromPath(&c.Controller) - source, err := c.sourceORM.GetByID(id) +func (h *Handler) DeleteSource() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Source not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Get all jobs using this source - jobs, err := c.jobORM.GetBySourceID(id) + logger.Debugf("Delete source initiated source_id[%d]", id) + + resp, err := h.etl.DeleteSource(h.Ctx.Request.Context(), id) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to get jobs for source") + if errors.Is(err, constants.ErrSourceNotFound) { + utils.ErrorResponse(&h.Controller, http.StatusNotFound, fmt.Sprintf("source not found: %s", err), err) + } else { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to delete source: %s", err), err) + } return } + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %s deleted successfully", resp.Name), resp) +} - // Deactivate all jobs using this source - for _, job := range jobs { - job.Active = false - if err := c.jobORM.Update(job); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to deactivate jobs using this source") - return - } +// @router /project/:projectid/sources/test [post] +func (h *Handler) TestSourceConnection() { + var req dto.SourceTestConnectionRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return } - // Delete the source - if err := c.sourceORM.Delete(id); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to delete source") + if err := dto.ValidateSourceType(req.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - telemetry.TrackSourcesStatus(context.Background()) - utils.SuccessResponse(&c.Controller, &models.DeleteSourceResponse{ - Name: source.Name, - }) -} + logger.Infof("Test source connection initiated source_type[%s] source_version[%s]", req.Type, req.Version) -// @router /project/:projectid/sources/test [post] -func (c *SourceHandler) TestConnection() { - var req models.SourceTestConnectionRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") - return - } - encryptedConfig, err := utils.Encrypt(req.Config) + result, logs, err := h.etl.TestSourceConnection(h.Ctx.Request.Context(), &req) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to encrypt config") + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to verify credentials: %s", err), err) return } - result, err := c.tempClient.TestConnection(context.Background(), "config", req.Type, req.Version, encryptedConfig) - if result == nil { - result = map[string]interface{}{ - "message": err.Error(), - "status": "failed", - } - } - utils.SuccessResponse(&c.Controller, result) + + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %s connection tested successfully", req.Type), dto.TestConnectionResponse{ + ConnectionResult: result, + Logs: logs, + }) } -// @router /sources/streams[post] -func (c *SourceHandler) GetSourceCatalog() { - var req models.StreamsRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") +// @router /sources/streams [post] +func (h *Handler) GetSourceCatalog() { + var req dto.StreamsRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - oldStreams := "" - // Load job details if JobID is provided - if req.JobID >= 0 { - job, err := c.jobORM.GetByID(req.JobID, true) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Job not found") - return - } - oldStreams = job.StreamsConfig - } - encryptedConfig, err := utils.Encrypt(req.Config) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to encrypt config") + + if err := dto.ValidateSourceType(req.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Use Temporal client to get the catalog - var newStreams map[string]interface{} - if c.tempClient != nil { - newStreams, err = c.tempClient.GetCatalog( - c.Ctx.Request.Context(), - req.Type, - req.Version, - encryptedConfig, - oldStreams, - req.JobName, - ) - } + + logger.Debugf("Get source catalog initiated source_type[%s] source_version[%s] job_id[%d]", + req.Type, req.Version, req.JobID) + + catalog, err := h.etl.GetSourceCatalog(h.Ctx.Request.Context(), &req) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to get catalog: %v", err)) + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get source streams: %s", err), err) return } - utils.SuccessResponse(&c.Controller, newStreams) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %s catalog fetched successfully", req.Type), catalog) } // @router /sources/:id/jobs [get] -func (c *SourceHandler) GetSourceJobs() { - id := GetIDFromPath(&c.Controller) - // Check if source exists - _, err := c.sourceORM.GetByID(id) +func (h *Handler) GetSourceJobs() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "Source not found") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Create a job ORM and get jobs by source ID - jobs, err := c.jobORM.GetBySourceID(id) + logger.Debugf("Get source jobs initiated source_id[%d]", id) + + jobs, err := h.etl.GetSourceJobs(h.Ctx.Request.Context(), id) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to get jobs by source ID") + if errors.Is(err, constants.ErrSourceNotFound) { + utils.ErrorResponse(&h.Controller, http.StatusNotFound, fmt.Sprintf("source not found: %s", err), err) + } else { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get source jobs: %s", err), err) + } return } - // Format as required by API contract - utils.SuccessResponse(&c.Controller, map[string]interface{}{ - "jobs": jobs, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %d jobs listed successfully", id), map[string]interface{}{"jobs": jobs}) } // @router /project/:projectid/sources/versions [get] -func (c *SourceHandler) GetSourceVersions() { - sourceType := c.GetString("type") - if sourceType == "" { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "source type is required") +func (h *Handler) GetSourceVersions() { + projectID, err := GetProjectIDFromPath(&h.Controller) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - versions, _, err := utils.GetDriverImageTags(c.Ctx.Request.Context(), fmt.Sprintf("olakego/source-%s", sourceType), true) + sourceType := h.GetString("type") + logger.Debugf("Get source versions initiated project_id[%s] source_type[%s]", projectID, sourceType) + if sourceType == "" { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to get source versions: %s", err), err) + return + } + versions, err := h.etl.GetSourceVersions(h.Ctx.Request.Context(), sourceType) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to fetch driver versions: %s", err)) + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get source versions: %s", err), err) return } - - utils.SuccessResponse(&c.Controller, map[string]interface{}{ - "version": versions, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %s versions fetched successfully", sourceType), versions) } -// @router /project/:projectid/sources/spec [get] -func (c *SourceHandler) GetProjectSourceSpec() { - _ = c.Ctx.Input.Param(":projectid") +// @router /project/:projectid/sources/spec [post] +func (h *Handler) GetSourceSpec() { + projectID, err := GetProjectIDFromPath(&h.Controller) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return + } - var req models.SpecRequest - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") + var req dto.SpecRequest + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - var specOutput models.SpecOutput - var err error - - specOutput, err = c.tempClient.FetchSpec( - c.Ctx.Request.Context(), - "", - req.Type, - req.Version, - ) + + if err := dto.ValidateSourceType(req.Type); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return + } + + logger.Debugf("Get source spec initiated project_id[%s] source_type[%s] source_version[%s]", + projectID, req.Type, req.Version) + + resp, err := h.etl.GetSourceSpec(h.Ctx.Request.Context(), &req) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to get spec: %v", err)) + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get source spec: %s", err), err) return } - utils.SuccessResponse(&c.Controller, models.SpecResponse{ - Version: req.Version, - Type: req.Type, - Spec: specOutput.Spec, - }) + utils.SuccessResponse(&h.Controller, fmt.Sprintf("source %s spec fetched successfully", req.Type), resp) } diff --git a/server/internal/handlers/ui.go b/server/internal/handlers/ui.go new file mode 100644 index 00000000..8eb2246b --- /dev/null +++ b/server/internal/handlers/ui.go @@ -0,0 +1,19 @@ +package handlers + +import ( + "net/http" + "path/filepath" + + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/spf13/viper" +) + +func (h *Handler) ServeFrontend() { + indexPath := viper.GetString(constants.FrontendIndexPath) + + // Set Content-Type early + h.Ctx.Output.ContentType("text/html") + + // Use built-in file serving for efficiency and proper headers + http.ServeFile(h.Ctx.ResponseWriter, h.Ctx.Request, filepath.Clean(indexPath)) +} diff --git a/server/internal/handlers/user.go b/server/internal/handlers/user.go index b56401da..be9ade83 100644 --- a/server/internal/handlers/user.go +++ b/server/internal/handlers/user.go @@ -1,103 +1,90 @@ package handlers import ( - "encoding/json" + "errors" "fmt" "net/http" - "strconv" - "time" - "github.com/beego/beego/v2/server/web" - - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/utils" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) -type UserHandler struct { - web.Controller - userORM *database.UserORM -} - -func (c *UserHandler) Prepare() { - c.userORM = database.NewUserORM() -} - // @router /users [post] -func (c *UserHandler) CreateUser() { +func (h *Handler) CreateUser() { var req models.User - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) + return + } + + if req.Username == "" || req.Email == "" || req.Password == "" { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", errors.New("missing required user fields")), errors.New("missing required user fields")) return } - if err := c.userORM.Create(&req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, fmt.Sprintf("Failed to create user: %s", err)) + logger.Infof("Create user initiated username[%s] email[%s]", req.Username, req.Email) + + if err := h.etl.CreateUser(h.Ctx.Request.Context(), &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to create user: %s", err), err) return } - utils.SuccessResponse(&c.Controller, req) + utils.SuccessResponse(&h.Controller, "user created successfully", req) } // @router /users [get] -func (c *UserHandler) GetAllUsers() { - users, err := c.userORM.GetAll() +func (h *Handler) GetAllUsers() { + logger.Info("Get all users initiated") + + users, err := h.etl.GetAllUsers(h.Ctx.Request.Context()) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to retrieve users") + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to get users: %s", err), err) return } - utils.SuccessResponse(&c.Controller, users) + utils.SuccessResponse(&h.Controller, "users listed successfully", users) } // @router /users/:id [put] -func (c *UserHandler) UpdateUser() { - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) +func (h *Handler) UpdateUser() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid user ID") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } var req models.User - if err := json.Unmarshal(c.Ctx.Input.RequestBody, &req); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid request format") - return - } - - // Get existing user - existingUser, err := c.userORM.GetByID(id) - if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusNotFound, "User not found") + if err := UnmarshalAndValidate(h.Ctx.Input.RequestBody, &req); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - // Update fields - existingUser.Username = req.Username - existingUser.Email = req.Email - existingUser.UpdatedAt = time.Now() + logger.Infof("Update user initiated user_id[%d] username[%s]", id, req.Username) - if err := c.userORM.Update(existingUser); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to update user") + updatedUser, err := h.etl.UpdateUser(h.Ctx.Request.Context(), id, &req) + if err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to update user: %s", err), err) return } - utils.SuccessResponse(&c.Controller, existingUser) + utils.SuccessResponse(&h.Controller, "user updated successfully", updatedUser) } // @router /users/:id [delete] -func (c *UserHandler) DeleteUser() { - idStr := c.Ctx.Input.Param(":id") - id, err := strconv.Atoi(idStr) +func (h *Handler) DeleteUser() { + id, err := GetIDFromPath(&h.Controller) if err != nil { - utils.ErrorResponse(&c.Controller, http.StatusBadRequest, "Invalid user ID") + utils.ErrorResponse(&h.Controller, http.StatusBadRequest, fmt.Sprintf("failed to validate request: %s", err), err) return } - if err := c.userORM.Delete(id); err != nil { - utils.ErrorResponse(&c.Controller, http.StatusInternalServerError, "Failed to delete user") + logger.Infof("Delete user initiated user_id[%d]", id) + + if err := h.etl.DeleteUser(h.Ctx.Request.Context(), id); err != nil { + utils.ErrorResponse(&h.Controller, http.StatusInternalServerError, fmt.Sprintf("failed to delete user: %s", err), err) return } - c.Ctx.ResponseWriter.WriteHeader(http.StatusNoContent) + utils.SuccessResponse(&h.Controller, "user deleted successfully", nil) } diff --git a/server/internal/handlers/utils.go b/server/internal/handlers/utils.go new file mode 100644 index 00000000..ea4fdb46 --- /dev/null +++ b/server/internal/handlers/utils.go @@ -0,0 +1,49 @@ +package handlers + +import ( + "encoding/json" + "fmt" + "strconv" + + "github.com/beego/beego/v2/server/web" + + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" +) + +// get id from path +func GetIDFromPath(c *web.Controller) (int, error) { + idStr := c.Ctx.Input.Param(":id") + id, err := strconv.Atoi(idStr) + if err != nil { + return 0, fmt.Errorf("invalid id: %s", err) + } + return id, nil +} + +// get id from path +func GetProjectIDFromPath(c *web.Controller) (string, error) { + projectID := c.Ctx.Input.Param(":projectid") + if projectID == "" { + return "", fmt.Errorf("project id is required") + } + return projectID, nil +} + +// Helper to extract user ID from session +func GetUserIDFromSession(c *web.Controller) *int { + if sessionUserID := c.GetSession(constants.SessionUserID); sessionUserID != nil { + if uid, ok := sessionUserID.(int); ok { + return &uid + } + } + return nil +} + +// UnmarshalAndValidate unmarshals JSON from request body into the provided struct +func UnmarshalAndValidate(requestBody []byte, target interface{}) error { + if err := json.Unmarshal(requestBody, target); err != nil { + return err + } + return dto.Validate(target) +} diff --git a/server/internal/logger/logger.go b/server/internal/logger/logger.go deleted file mode 100644 index b45fb39a..00000000 --- a/server/internal/logger/logger.go +++ /dev/null @@ -1,56 +0,0 @@ -package logger - -import ( - "os" - "path" - "sync" - - "github.com/beego/beego/v2/core/logs" -) - -var ( - loggerInitOnce sync.Once -) - -func InitLogger(logdir string) { - loggerInitOnce.Do(func() { - // Clear existing loggers first - logs.Reset() - - // Create logs directory - if err := os.MkdirAll(logdir, 0755); err != nil { - panic("Failed to create log directory: " + err.Error()) - } - - // Console configuration - consoleConfig := `{ - "level": 7, - "color": true - }` - - // File configuration - fileConfig := `{ - "filename": "` + path.Join(logdir, "olake-server.log") + `", - "level": 7, - "maxlines": 1000, - "maxdays": 7, - "daily": false, - "rotate": true, - "perm": "0644" - }` - - // Initialize loggers - if err := logs.SetLogger(logs.AdapterConsole, consoleConfig); err != nil { - panic("Console logger init failed: " + err.Error()) - } - - if err := logs.SetLogger(logs.AdapterFile, fileConfig); err != nil { - panic("File logger init failed: " + err.Error()) - } - - // Configure logger behavior - logs.SetLogFuncCallDepth(3) - logs.EnableFuncCallDepth(true) - logs.SetLevel(logs.LevelDebug) - }) -} diff --git a/server/internal/models/db.go b/server/internal/models/db.go index 65aee7ed..67642bfe 100644 --- a/server/internal/models/db.go +++ b/server/internal/models/db.go @@ -3,7 +3,7 @@ package models import ( "time" - "github.com/datazip/olake-frontend/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/constants" ) // BaseModel with common fields diff --git a/server/internal/models/dto/requests.go b/server/internal/models/dto/requests.go new file mode 100644 index 00000000..ade7571a --- /dev/null +++ b/server/internal/models/dto/requests.go @@ -0,0 +1,98 @@ +package dto + +// Common fields for source/destination config +// source and destination are driver in olake cli +type DriverConfig struct { + ID *int `json:"id,omitempty"` + Name string `json:"name"` + Type string `json:"type"` + Version string `json:"version"` + Source string `json:"source_type"` + Config string `json:"config" orm:"type(jsonb)"` +} + +type LoginRequest struct { + Username string `json:"username" validate:"required"` + Password string `json:"password" validate:"required"` +} + +type SpecRequest struct { + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` +} + +// check unique job name request +type CheckUniqueJobNameRequest struct { + JobName string `json:"job_name"` +} + +// Test connection requests +type SourceTestConnectionRequest struct { + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` + Config string `json:"config" orm:"type(jsonb)" validate:"required"` +} +type StreamsRequest struct { + Name string `json:"name" validate:"required"` + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` + Config string `json:"config" orm:"type(jsonb)" validate:"required"` + JobID int `json:"job_id" validate:"required"` + JobName string `json:"job_name" validate:"required"` +} + +// TODO: frontend needs to send only version no need for source version +type DestinationTestConnectionRequest struct { + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` + Config string `json:"config" validate:"required"` + SourceType string `json:"source_type"` + SourceVersion string `json:"source_version"` +} + +type CreateSourceRequest struct { + Name string `json:"name" validate:"required"` + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` + Config string `json:"config" orm:"type(jsonb)" validate:"required"` +} + +type UpdateSourceRequest struct { + Name string `json:"name" validate:"required"` + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` + Config string `json:"config" orm:"type(jsonb)" validate:"required"` +} + +type CreateDestinationRequest struct { + Name string `json:"name" validate:"required"` + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` + Config string `json:"config" orm:"type(jsonb)" validate:"required"` +} + +type UpdateDestinationRequest struct { + Name string `json:"name" validate:"required"` + Type string `json:"type" validate:"required"` + Version string `json:"version" validate:"required"` + Config string `json:"config" orm:"type(jsonb)" validate:"required"` +} + +type CreateJobRequest struct { + Name string `json:"name" validate:"required"` + Source *DriverConfig `json:"source" validate:"required"` + Destination *DriverConfig `json:"destination" validate:"required"` + Frequency string `json:"frequency" validate:"required"` + StreamsConfig string `json:"streams_config" orm:"type(jsonb)" validate:"required"` + Activate bool `json:"activate,omitempty"` +} + +type UpdateJobRequest = CreateJobRequest + +type JobTaskRequest struct { + FilePath string `json:"file_path" validate:"required"` +} + +type JobStatusRequest struct { + Activate bool `json:"activate"` +} diff --git a/server/internal/models/response.go b/server/internal/models/dto/response.go similarity index 68% rename from server/internal/models/response.go rename to server/internal/models/dto/response.go index 9aaf18ed..54331bb8 100644 --- a/server/internal/models/response.go +++ b/server/internal/models/dto/response.go @@ -1,9 +1,4 @@ -package models - -type LoginResponse struct { - Message string `json:"message"` - Success bool `json:"success"` -} +package dto type JSONResponse struct { Success bool `json:"success"` @@ -20,13 +15,6 @@ type SpecOutput struct { Spec map[string]interface{} `json:"spec"` } -// Reuse generic API response with generics -type APIResponse[T any] struct { - Success bool `json:"success"` - Message string `json:"message"` - Data T `json:"data"` -} - type DeleteSourceResponse struct { Name string `json:"name"` } @@ -44,21 +32,27 @@ type CheckUniqueJobNameResponse struct { Unique bool `json:"unique"` } +// TestConnectionResponse +type TestConnectionResponse struct { + ConnectionResult map[string]interface{} `json:"connection_result"` + Logs []map[string]interface{} `json:"logs"` +} + // Job response type JobResponse struct { - ID int `json:"id"` - Name string `json:"name"` - Source JobSourceConfig `json:"source"` - Destination JobDestinationConfig `json:"destination"` - StreamsConfig string `json:"streams_config"` - Frequency string `json:"frequency"` - LastRunTime string `json:"last_run_time,omitempty"` - LastRunState string `json:"last_run_state,omitempty"` - CreatedAt string `json:"created_at"` - UpdatedAt string `json:"updated_at"` - Activate bool `json:"activate"` - CreatedBy string `json:"created_by,omitempty"` - UpdatedBy string `json:"updated_by,omitempty"` + ID int `json:"id"` + Name string `json:"name"` + Source DriverConfig `json:"source"` + Destination DriverConfig `json:"destination"` + StreamsConfig string `json:"streams_config"` + Frequency string `json:"frequency"` + LastRunTime string `json:"last_run_time,omitempty"` + LastRunState string `json:"last_run_state,omitempty"` + CreatedAt string `json:"created_at"` + UpdatedAt string `json:"updated_at"` + Activate bool `json:"activate"` + CreatedBy string `json:"created_by,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` } type JobTask struct { diff --git a/server/internal/models/dto/validate.go b/server/internal/models/dto/validate.go new file mode 100644 index 00000000..ce2a33eb --- /dev/null +++ b/server/internal/models/dto/validate.go @@ -0,0 +1,47 @@ +package dto + +import ( + "fmt" + + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/go-playground/validator/v10" +) + +// ValidateStruct validates any struct that has `validate` tags. +func Validate(s interface{}) error { + validate := validator.New() + err := validate.Struct(s) + if err != nil { + if _, ok := err.(*validator.InvalidValidationError); ok { + return fmt.Errorf("invalid validation: %s", err) + } + + // collect all validation errors into a single message + var errorMessages string + for _, err := range err.(validator.ValidationErrors) { + errorMessages += fmt.Sprintf("Field '%s' failed validation rule '%s'; ", err.Field(), err.Tag()) + } + return fmt.Errorf("validation failed: %s", errorMessages) + } + return nil +} + +// ValidateSourceType checks if the provided type is in the list of supported source types +func ValidateSourceType(t string) error { + for _, allowed := range constants.SupportedSourceTypes { + if t == allowed { + return nil + } + } + return fmt.Errorf("invalid source type '%s', supported sources are: %v", t, constants.SupportedSourceTypes) +} + +// ValidateDestinationType checks if the provided type is in the list of supported destination types +func ValidateDestinationType(t string) error { + for _, allowed := range constants.SupportedDestinationTypes { + if t == allowed { + return nil + } + } + return fmt.Errorf("invalid destination type '%s', supported destinations are: %v", t, constants.SupportedDestinationTypes) +} diff --git a/server/internal/models/requests.go b/server/internal/models/requests.go deleted file mode 100644 index 1563f7cc..00000000 --- a/server/internal/models/requests.go +++ /dev/null @@ -1,83 +0,0 @@ -package models - -// Common fields for source/destination config -type ConnectorConfig struct { - Name string `json:"name"` - Type string `json:"type"` - Version string `json:"version"` - Source string `json:"source_type"` - Config string `json:"config" orm:"type(jsonb)"` -} - -// LoginRequest represents the expected JSON structure for login requests -type LoginRequest struct { - Username string `json:"username"` - Password string `json:"password"` -} - -// Spec request for getting specs -type SpecRequest struct { - Type string `json:"type"` - Version string `json:"version"` - Catalog string `json:"catalog"` -} - -// check unique job name request -type CheckUniqueJobNameRequest struct { - JobName string `json:"job_name"` -} - -// Test connection requests -type SourceTestConnectionRequest struct { - ConnectorConfig - SourceID int `json:"source_id"` -} -type StreamsRequest struct { - ConnectorConfig - JobID int `json:"job_id"` - JobName string `json:"job_name"` -} - -type DestinationTestConnectionRequest struct { - ConnectorConfig -} - -// Create/Update source and destination requests -type CreateSourceRequest struct { - ConnectorConfig -} - -type UpdateSourceRequest struct { - ConnectorConfig -} - -type CreateDestinationRequest struct { - ConnectorConfig -} - -type UpdateDestinationRequest struct { - ConnectorConfig -} - -// Job source and destination configurations -type JobSourceConfig = ConnectorConfig -type JobDestinationConfig = ConnectorConfig - -// Create and update job requests -type CreateJobRequest struct { - Name string `json:"name"` - Source JobSourceConfig `json:"source"` - Destination JobDestinationConfig `json:"destination"` - Frequency string `json:"frequency"` - StreamsConfig string `json:"streams_config" orm:"type(jsonb)"` - Activate bool `json:"activate,omitempty"` -} - -type UpdateJobRequest struct { - Name string `json:"name"` - Source JobSourceConfig `json:"source"` - Destination JobDestinationConfig `json:"destination"` - Frequency string `json:"frequency"` - StreamsConfig string `json:"streams_config" orm:"type(jsonb)"` - Activate bool `json:"activate,omitempty"` -} diff --git a/server/internal/services/etl/auth.go b/server/internal/services/etl/auth.go new file mode 100644 index 00000000..b5fb1a83 --- /dev/null +++ b/server/internal/services/etl/auth.go @@ -0,0 +1,64 @@ +package services + +import ( + "context" + "fmt" + "strings" + + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils/telemetry" + "golang.org/x/crypto/bcrypt" +) + +// Auth-related methods on AppService + +func (s *ETLService) Login(ctx context.Context, username, password string) (*models.User, error) { + user, err := s.db.GetUserByUsername(username) + if err != nil { + if strings.Contains(err.Error(), "no row found") { + return nil, fmt.Errorf("user not found: %s", err) + } + return nil, fmt.Errorf("failed to get user: %s", err) + } + + if err := s.db.CompareUserPassword(user.Password, password); err != nil { + return nil, fmt.Errorf("invalid credentials: %s", err) + } + + telemetry.TrackUserLogin(ctx, user) + + return user, nil +} + +func (s *ETLService) Signup(_ context.Context, user *models.User) error { + hashedPassword, err := bcrypt.GenerateFromPassword([]byte(user.Password), bcrypt.DefaultCost) + if err != nil { + return fmt.Errorf("failed to hash password: %s", err) + } + user.Password = string(hashedPassword) + + if err := s.db.CreateUser(user); err != nil { + if strings.Contains(err.Error(), "duplicate") || strings.Contains(err.Error(), "unique") { + return fmt.Errorf("user already exists: %s", err) + } + return fmt.Errorf("failed to create user: %s", err) + } + + return nil +} + +func (s *ETLService) GetUserByID(userID int) (*models.User, error) { + user, err := s.db.GetUserByID(userID) + if err != nil { + return nil, fmt.Errorf("failed to find user: %s", err) + } + return user, nil +} + +func (s *ETLService) ValidateUser(userID int) error { + _, err := s.db.GetUserByID(userID) + if err != nil { + return fmt.Errorf("failed to validate user: %s", err) + } + return nil +} diff --git a/server/internal/services/etl/destination.go b/server/internal/services/etl/destination.go new file mode 100644 index 00000000..067171db --- /dev/null +++ b/server/internal/services/etl/destination.go @@ -0,0 +1,231 @@ +package services + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/telemetry" +) + +// Destination-related methods on AppService + +// ListDestinations returns all destinations for a project with lightweight job summaries. +func (s *ETLService) ListDestinations(_ context.Context, projectID string) ([]dto.DestinationDataItem, error) { + destinations, err := s.db.ListDestinationsByProjectID(projectID) + if err != nil { + return nil, fmt.Errorf("failed to list destinations: %s", err) + } + + destIDs := make([]int, 0, len(destinations)) + for _, dest := range destinations { + destIDs = append(destIDs, dest.ID) + } + + var allJobs []*models.Job + allJobs, err = s.db.GetJobsByDestinationID(destIDs) + if err != nil { + return nil, fmt.Errorf("failed to list jobs: %s", err) + } + + jobsByDestID := make(map[int][]*models.Job) + for _, job := range allJobs { + jobsByDestID[job.DestID.ID] = append(jobsByDestID[job.DestID.ID], job) + } + + destItems := make([]dto.DestinationDataItem, 0, len(destinations)) + for _, dest := range destinations { + entity := dto.DestinationDataItem{ + ID: dest.ID, + Name: dest.Name, + Type: dest.DestType, + Version: dest.Version, + Config: dest.Config, + CreatedAt: dest.CreatedAt.Format(time.RFC3339), + UpdatedAt: dest.UpdatedAt.Format(time.RFC3339), + } + setUsernames(&entity.CreatedBy, &entity.UpdatedBy, dest.CreatedBy, dest.UpdatedBy) + + jobs := jobsByDestID[dest.ID] + jobItems, err := buildJobDataItems(jobs, s.temporal, "destination") + if err != nil { + return nil, fmt.Errorf("failed to build job data items: %s", err) + } + entity.Jobs = jobItems + destItems = append(destItems, entity) + } + + return destItems, nil +} + +func (s *ETLService) CreateDestination(ctx context.Context, req *dto.CreateDestinationRequest, projectID string, userID *int) error { + destination := &models.Destination{ + Name: req.Name, + DestType: req.Type, + Version: req.Version, + Config: req.Config, + ProjectID: projectID, + } + user := &models.User{ID: *userID} + destination.CreatedBy = user + destination.UpdatedBy = user + + if err := s.db.CreateDestination(destination); err != nil { + return fmt.Errorf("failed to create destination: %s", err) + } + + telemetry.TrackDestinationCreation(ctx, destination) + return nil +} + +func (s *ETLService) UpdateDestination(ctx context.Context, id int, projectID string, req *dto.UpdateDestinationRequest, userID *int) error { + existingDest, err := s.db.GetDestinationByID(id) + if err != nil { + return fmt.Errorf("failed to get destination: %s", err) + } + + existingDest.Name = req.Name + existingDest.DestType = req.Type + existingDest.Version = req.Version + existingDest.Config = req.Config + + user := &models.User{ID: *userID} + existingDest.UpdatedBy = user + + jobs, err := s.db.GetJobsByDestinationID([]int{existingDest.ID}) + if err != nil { + return fmt.Errorf("failed to fetch jobs for destination update: %s", err) + } + + if err := cancelAllJobWorkflows(ctx, s.temporal, jobs, projectID); err != nil { + return fmt.Errorf("failed to cancel workflows for destination update: %s", err) + } + + if err := s.db.UpdateDestination(existingDest); err != nil { + return fmt.Errorf("failed to update destination: %s", err) + } + + telemetry.TrackDestinationsStatus(ctx) + return nil +} + +func (s *ETLService) DeleteDestination(ctx context.Context, id int) (*dto.DeleteDestinationResponse, error) { + dest, err := s.db.GetDestinationByID(id) + if err != nil { + return nil, fmt.Errorf("failed to find destination: %s", err) + } + + jobs, err := s.db.GetJobsByDestinationID([]int{id}) + if err != nil { + return nil, fmt.Errorf("failed to retrieve jobs for destination deletion: %s", err) + } + if len(jobs) > 0 { + return nil, fmt.Errorf("cannot delete destination '%s' id[%d] because it is used in %d jobs; please delete the associated jobs first", dest.Name, id, len(jobs)) + } + var jobIDs []int + for _, job := range jobs { + job.Active = false + jobIDs = append(jobIDs, job.ID) + } + + if err := s.db.DeactivateJobs(jobIDs); err != nil { + return nil, fmt.Errorf("failed to deactivate jobs for destination deletion: %s", err) + } + + if err := s.db.DeleteDestination(id); err != nil { + return nil, fmt.Errorf("failed to delete destination: %s", err) + } + + telemetry.TrackDestinationsStatus(ctx) + return &dto.DeleteDestinationResponse{Name: dest.Name}, nil +} + +func (s *ETLService) TestDestinationConnection(ctx context.Context, req *dto.DestinationTestConnectionRequest) (map[string]interface{}, []map[string]interface{}, error) { + version := req.Version + driver := req.SourceType + if driver == "" { + var err error + _, driver, err = utils.GetDriverImageTags(ctx, "", true) + if err != nil { + return nil, nil, fmt.Errorf("failed to get driver image tags: %s", err) + } + } + + encryptedConfig, err := utils.Encrypt(req.Config) + if err != nil { + return nil, nil, fmt.Errorf("failed to encrypt config for test connection: %s", err) + } + workflowID := fmt.Sprintf("test-connection-%s-%d", req.Type, time.Now().Unix()) + result, err := s.temporal.VerifyDriverCredentials(ctx, workflowID, "destination", driver, version, encryptedConfig) + // TODO: handle from frontend + if result == nil { + result = map[string]interface{}{ + "message": err.Error(), + "status": "failed", + } + } + + if err != nil { + return result, nil, fmt.Errorf("connection test failed: %s", err) + } + + homeDir := constants.DefaultConfigDir + mainLogDir := filepath.Join(homeDir, workflowID) + logs, err := utils.ReadLogs(mainLogDir) + if err != nil { + return result, nil, fmt.Errorf("failed to read logs destination_type[%s] destination_version[%s] error[%s]", + req.Type, req.Version, err) + } + + return result, logs, nil +} + +func (s *ETLService) GetDestinationJobs(_ context.Context, id int) ([]*models.Job, error) { + if _, err := s.db.GetDestinationByID(id); err != nil { + return nil, fmt.Errorf("failed to find destination: %s", err) + } + + jobs, err := s.db.GetJobsByDestinationID([]int{id}) + if err != nil { + return nil, fmt.Errorf("failed to get jobs by destination: %s", err) + } + + return jobs, nil +} + +func (s *ETLService) GetDestinationVersions(ctx context.Context, destType string) (map[string]interface{}, error) { + if destType == "" { + return nil, fmt.Errorf("destination type is required") + } + + versions, _, err := utils.GetDriverImageTags(ctx, "", true) + if err != nil { + return nil, fmt.Errorf("failed to get driver image tags: %s", err) + } + + return map[string]interface{}{"version": versions}, nil +} + +// TODO: cache spec in db for each version +func (s *ETLService) GetDestinationSpec(ctx context.Context, req *dto.SpecRequest) (dto.SpecResponse, error) { + _, driver, err := utils.GetDriverImageTags(ctx, "", true) + if err != nil { + return dto.SpecResponse{}, fmt.Errorf("failed to get driver image tags: %s", err) + } + + specOut, err := s.temporal.GetDriverSpecs(ctx, req.Type, driver, req.Version) + if err != nil { + return dto.SpecResponse{}, fmt.Errorf("failed to get spec: %s", err) + } + + return dto.SpecResponse{ + Version: req.Version, + Type: req.Type, + Spec: specOut.Spec, + }, nil +} diff --git a/server/internal/services/etl/job.go b/server/internal/services/etl/job.go new file mode 100644 index 00000000..9da89f6d --- /dev/null +++ b/server/internal/services/etl/job.go @@ -0,0 +1,399 @@ +package services + +import ( + "context" + "crypto/sha256" + "fmt" + "path/filepath" + "strings" + "time" + + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/logger" + "github.com/datazip-inc/olake-ui/server/utils/telemetry" + "go.temporal.io/api/workflowservice/v1" +) + +// Job-related methods on AppService + +func (s *ETLService) ListJobs(ctx context.Context, projectID string) ([]dto.JobResponse, error) { + jobs, err := s.db.ListJobsByProjectID(projectID) + if err != nil { + return nil, fmt.Errorf("failed to list jobs: %s", err) + } + + jobResponses := make([]dto.JobResponse, 0, len(jobs)) + for _, job := range jobs { + jobResp, err := s.buildJobResponse(ctx, job, projectID) + if err != nil { + return nil, fmt.Errorf("failed to build job response: %s", err) + } + jobResponses = append(jobResponses, jobResp) + } + + return jobResponses, nil +} + +func (s *ETLService) CreateJob(ctx context.Context, req *dto.CreateJobRequest, projectID string, userID *int) error { + unique, err := s.db.IsJobNameUniqueInProject(projectID, req.Name) + if err != nil { + return fmt.Errorf("failed to check job name uniqueness: %s", err) + } + if !unique { + return fmt.Errorf("job name '%s' is not unique", req.Name) + } + source, err := s.upsertSource(req.Source, projectID, userID) + if err != nil { + return fmt.Errorf("failed to process source: %s", err) + } + + dest, err := s.upsertDestination(req.Destination, projectID, userID) + if err != nil { + return fmt.Errorf("failed to process destination: %s", err) + } + + user := &models.User{ID: *userID} + job := &models.Job{ + Name: req.Name, + SourceID: source, + DestID: dest, + Active: true, + Frequency: req.Frequency, + StreamsConfig: req.StreamsConfig, + State: "{}", + ProjectID: projectID, + CreatedBy: user, + UpdatedBy: user, + } + if err := s.db.CreateJob(job); err != nil { + return fmt.Errorf("failed to create job: %s", err) + } + + defer func() { + if err != nil { + if err := s.db.DeleteJob(job.ID); err != nil { + logger.Errorf("failed to delete job: %s", err) + } + } + }() + + if err = s.temporal.CreateSchedule(ctx, job); err != nil { + return fmt.Errorf("failed to create temporal workflow: %s", err) + } + + telemetry.TrackJobCreation(ctx, &models.Job{Name: req.Name}) + return nil +} + +func (s *ETLService) UpdateJob(ctx context.Context, req *dto.UpdateJobRequest, projectID string, jobID int, userID *int) error { + existingJob, err := s.db.GetJobByID(jobID, true) + if err != nil { + return fmt.Errorf("failed to get job: %s", err) + } + + // Snapshot previous job state for compensation on schedule update failure + prevJob := *existingJob + + source, err := s.upsertSource(req.Source, projectID, userID) + if err != nil { + return fmt.Errorf("failed to process source for job update: %s", err) + } + + dest, err := s.upsertDestination(req.Destination, projectID, userID) + if err != nil { + return fmt.Errorf("failed to process destination for job update: %s", err) + } + + existingJob.Name = req.Name // TODO: job name cant be changed + existingJob.SourceID = source + existingJob.DestID = dest + existingJob.Active = req.Activate + existingJob.Frequency = req.Frequency + existingJob.StreamsConfig = req.StreamsConfig + existingJob.ProjectID = projectID + existingJob.UpdatedBy = &models.User{ID: *userID} + // cancel existing workflow + err = cancelAllJobWorkflows(ctx, s.temporal, []*models.Job{existingJob}, projectID) + if err != nil { + return fmt.Errorf("failed to cancel workflow for job %s", err) + } + if err := s.db.UpdateJob(existingJob); err != nil { + return fmt.Errorf("failed to update job: %s", err) + } + + err = s.temporal.UpdateSchedule(ctx, existingJob.Frequency, existingJob.ProjectID, existingJob.ID) + if err != nil { + // Compensation: restore previous DB state if schedule update fails + if rerr := s.db.UpdateJob(&prevJob); rerr != nil { + logger.Errorf("failed to restore job after schedule update error: %s", rerr) + } + return fmt.Errorf("failed to update temporal workflow: %s", err) + } + + telemetry.TrackJobEntity(ctx) + return nil +} + +func (s *ETLService) DeleteJob(ctx context.Context, jobID int) (string, error) { + job, err := s.db.GetJobByID(jobID, true) + if err != nil { + return "", fmt.Errorf("failed to find job: %s", err) + } + + if err = s.temporal.DeleteSchedule(ctx, job.ProjectID, job.ID); err != nil { + return "", fmt.Errorf("failed to delete temporal workflow: %s", err) + } + + if err := s.db.DeleteJob(jobID); err != nil { + return "", fmt.Errorf("failed to delete job: %s", err) + } + + telemetry.TrackJobEntity(ctx) + return job.Name, nil +} + +func (s *ETLService) SyncJob(ctx context.Context, projectID string, jobID int) (interface{}, error) { + if err := s.temporal.TriggerSchedule(ctx, projectID, jobID); err != nil { + return nil, fmt.Errorf("failed to trigger sync: %s", err) + } + + return map[string]any{ + "message": "sync triggered successfully", + }, nil +} + +func (s *ETLService) CancelJobRun(ctx context.Context, projectID string, jobID int) error { + job, err := s.db.GetJobByID(jobID, true) + if err != nil { + return fmt.Errorf("failed to find job: %s", err) + } + + jobSlice := []*models.Job{job} + if err := cancelAllJobWorkflows(ctx, s.temporal, jobSlice, projectID); err != nil { + return fmt.Errorf("failed to cancel job workflow: %s", err) + } + return nil +} + +func (s *ETLService) ActivateJob(ctx context.Context, jobID int, req dto.JobStatusRequest, userID *int) error { + job, err := s.db.GetJobByID(jobID, true) + if err != nil { + return fmt.Errorf("failed to find job: %s", err) + } + + if req.Activate == job.Active { + return nil + } + + if req.Activate { + if err := s.temporal.ResumeSchedule(ctx, job.ProjectID, job.ID); err != nil { + return fmt.Errorf("failed to unpause schedule: %s", err) + } + } else { + if err := s.temporal.PauseSchedule(ctx, job.ProjectID, job.ID); err != nil { + return fmt.Errorf("failed to pause schedule: %s", err) + } + } + + job.Active = req.Activate + user := &models.User{ID: *userID} + job.UpdatedBy = user + + if err := s.db.UpdateJob(job); err != nil { + return fmt.Errorf("failed to update job activation status: %s", err) + } + + return nil +} + +func (s *ETLService) CheckUniqueJobName(_ context.Context, projectID string, req dto.CheckUniqueJobNameRequest) (bool, error) { + unique, err := s.db.IsJobNameUniqueInProject(projectID, req.JobName) + if err != nil { + return false, fmt.Errorf("failed to check job name uniqueness: %s", err) + } + + return unique, nil +} + +func (s *ETLService) GetJobTasks(ctx context.Context, projectID string, jobID int) ([]dto.JobTask, error) { + job, err := s.db.GetJobByID(jobID, true) + if err != nil { + return nil, fmt.Errorf("failed to find job: %s", err) + } + + var tasks []dto.JobTask + query := fmt.Sprintf("WorkflowId between 'sync-%s-%d' and 'sync-%s-%d-~'", projectID, job.ID, projectID, job.ID) + + resp, err := s.temporal.ListWorkflow(ctx, &workflowservice.ListWorkflowExecutionsRequest{ + Query: query, + }) + if err != nil { + return nil, fmt.Errorf("failed to list workflows: %s", err) + } + + for _, execution := range resp.Executions { + startTime := execution.StartTime.AsTime().UTC() + var runTime string + if execution.CloseTime != nil { + runTime = execution.CloseTime.AsTime().UTC().Sub(startTime).Round(time.Second).String() + } else { + runTime = time.Since(startTime).Round(time.Second).String() + } + tasks = append(tasks, dto.JobTask{ + Runtime: runTime, + StartTime: startTime.Format(time.RFC3339), + Status: execution.Status.String(), + FilePath: execution.Execution.WorkflowId, + }) + } + + return tasks, nil +} + +func (s *ETLService) GetTaskLogs(_ context.Context, jobID int, filePath string) ([]map[string]interface{}, error) { + _, err := s.db.GetJobByID(jobID, true) + if err != nil { + return nil, fmt.Errorf("failed to find job: %s", err) + } + + syncFolderName := fmt.Sprintf("%x", sha256.Sum256([]byte(filePath))) + + // Get home directory + homeDir := constants.DefaultConfigDir + mainSyncDir := filepath.Join(homeDir, syncFolderName) + logs, err := utils.ReadLogs(mainSyncDir) + if err != nil { + return nil, fmt.Errorf("failed to read logs: %s", err) + } + // TODO: need to add activity logs as well with sync logs + return logs, nil +} + +// TODO: frontend needs to send source id and destination id +func (s *ETLService) buildJobResponse(ctx context.Context, job *models.Job, projectID string) (dto.JobResponse, error) { + jobResp := dto.JobResponse{ + ID: job.ID, + Name: job.Name, + StreamsConfig: job.StreamsConfig, + Frequency: job.Frequency, + CreatedAt: job.CreatedAt.Format(time.RFC3339), + UpdatedAt: job.UpdatedAt.Format(time.RFC3339), + Activate: job.Active, + } + + if job.SourceID != nil { + jobResp.Source = dto.DriverConfig{ + ID: &job.SourceID.ID, + Name: job.SourceID.Name, + Type: job.SourceID.Type, + Config: job.SourceID.Config, + Version: job.SourceID.Version, + } + } + + if job.DestID != nil { + jobResp.Destination = dto.DriverConfig{ + ID: &job.DestID.ID, + Name: job.DestID.Name, + Type: job.DestID.DestType, + Config: job.DestID.Config, + Version: job.DestID.Version, + } + } + + if job.CreatedBy != nil { + jobResp.CreatedBy = job.CreatedBy.Username + } + if job.UpdatedBy != nil { + jobResp.UpdatedBy = job.UpdatedBy.Username + } + + query := fmt.Sprintf("WorkflowId between 'sync-%s-%d' and 'sync-%s-%d-~'", projectID, job.ID, projectID, job.ID) + resp, err := s.temporal.ListWorkflow(ctx, &workflowservice.ListWorkflowExecutionsRequest{ + Query: query, + PageSize: 1, + }) + if err != nil { + return dto.JobResponse{}, fmt.Errorf("failed to list workflows: %s", err) + } + if len(resp.Executions) > 0 { + jobResp.LastRunTime = resp.Executions[0].StartTime.AsTime().Format(time.RFC3339) + jobResp.LastRunState = resp.Executions[0].Status.String() + } + + return jobResp, nil +} + +func (s *ETLService) upsertSource(config *dto.DriverConfig, projectID string, userID *int) (*models.Source, error) { + if config == nil { + return nil, fmt.Errorf("source config is required") + } + + // If ID provided, use that source as-is without modifying it. + if config.ID != nil { + return s.db.GetSourceByID(*config.ID) + } + + user := &models.User{ID: *userID} + // Otherwise, create a new source. + newSource := &models.Source{ + Name: config.Name, + Type: config.Type, + Config: config.Config, + Version: config.Version, + ProjectID: projectID, + CreatedBy: user, + UpdatedBy: user, + } + if err := s.db.CreateSource(newSource); err != nil { + return nil, fmt.Errorf("failed to create source: %s", err) + } + + return newSource, nil +} + +func (s *ETLService) upsertDestination(config *dto.DriverConfig, projectID string, userID *int) (*models.Destination, error) { + if config == nil { + return nil, fmt.Errorf("destination config is required") + } + + // If ID provided, use that destination as-is without modifying it. + if config.ID != nil { + return s.db.GetDestinationByID(*config.ID) + } + + user := &models.User{ID: *userID} + // Otherwise, create a new destination. + newDest := &models.Destination{ + Name: config.Name, + DestType: config.Type, + Config: config.Config, + Version: config.Version, + ProjectID: projectID, + CreatedBy: user, + UpdatedBy: user, + } + + if err := s.db.CreateDestination(newDest); err != nil { + return nil, fmt.Errorf("failed to create destination: %s", err) + } + + return newDest, nil +} + +// worker service +func (s *ETLService) UpdateSyncTelemetry(ctx context.Context, jobID int, workflowID, event string) error { + switch strings.ToLower(event) { + case "started": + telemetry.TrackSyncStart(ctx, jobID, workflowID) + case "completed": + telemetry.TrackSyncCompleted(jobID, workflowID) + case "failed": + telemetry.TrackSyncFailed(jobID, workflowID) + } + + return nil +} diff --git a/server/internal/services/etl/services.go b/server/internal/services/etl/services.go new file mode 100644 index 00000000..a54fb26d --- /dev/null +++ b/server/internal/services/etl/services.go @@ -0,0 +1,26 @@ +package services + +import ( + "github.com/datazip-inc/olake-ui/server/internal/database" + "github.com/datazip-inc/olake-ui/server/internal/services/temporal" +) + +// AppService is a unified service exposing all domain operations backed by shared deps. +type ETLService struct { + // single ORM facade using one Ormer + db *database.Database + temporal *temporal.Temporal +} + +// InitAppService constructs a unified AppService with singletons. +func InitAppService(db *database.Database) (*ETLService, error) { + client, err := temporal.NewClient() + if err != nil { + return nil, err + } + + return &ETLService{ + db: db, + temporal: client, + }, nil +} diff --git a/server/internal/services/etl/source.go b/server/internal/services/etl/source.go new file mode 100644 index 00000000..62a499ab --- /dev/null +++ b/server/internal/services/etl/source.go @@ -0,0 +1,249 @@ +package services + +import ( + "context" + "fmt" + "path/filepath" + "time" + + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils" + "github.com/datazip-inc/olake-ui/server/utils/telemetry" +) + +// Source-related methods on AppService + +// GetAllSources returns all sources for a project with lightweight job summaries. +func (s *ETLService) ListSources(_ context.Context, _ string) ([]dto.SourceDataItem, error) { + sources, err := s.db.ListSources() + if err != nil { + return nil, fmt.Errorf("failed to list sources: %s", err) + } + + sourceIDs := make([]int, 0, len(sources)) + for _, src := range sources { + sourceIDs = append(sourceIDs, src.ID) + } + + var allJobs []*models.Job + allJobs, err = s.db.GetJobsBySourceID(sourceIDs) + if err != nil { + return nil, fmt.Errorf("failed to list jobs: %s", err) + } + + jobsBySourceID := make(map[int][]*models.Job) + for _, job := range allJobs { + if job.SourceID != nil { + jobsBySourceID[job.SourceID.ID] = append(jobsBySourceID[job.SourceID.ID], job) + } + } + + items := make([]dto.SourceDataItem, 0, len(sources)) + for _, src := range sources { + item := dto.SourceDataItem{ + ID: src.ID, + Name: src.Name, + Type: src.Type, + Version: src.Version, + Config: src.Config, + CreatedAt: src.CreatedAt.Format(time.RFC3339), + UpdatedAt: src.UpdatedAt.Format(time.RFC3339), + } + setUsernames(&item.CreatedBy, &item.UpdatedBy, src.CreatedBy, src.UpdatedBy) + + jobs := jobsBySourceID[src.ID] + jobItems, err := buildJobDataItems(jobs, s.temporal, "source") + if err != nil { + return nil, fmt.Errorf("failed to build job data items: %s", err) + } + item.Jobs = jobItems + + items = append(items, item) + } + + return items, nil +} + +func (s *ETLService) CreateSource(ctx context.Context, req *dto.CreateSourceRequest, projectID string, userID *int) error { + src := &models.Source{ + Name: req.Name, + Type: req.Type, + Version: req.Version, + Config: req.Config, + ProjectID: projectID, + } + + user := &models.User{ID: *userID} + src.CreatedBy = user + src.UpdatedBy = user + + if err := s.db.CreateSource(src); err != nil { + return fmt.Errorf("failed to create source: %s", err) + } + + telemetry.TrackSourceCreation(ctx, src) + return nil +} + +func (s *ETLService) UpdateSource(ctx context.Context, projectID string, id int, req *dto.UpdateSourceRequest, userID *int) error { + existing, err := s.db.GetSourceByID(id) + if err != nil { + return fmt.Errorf("failed to get source: %s", err) + } + + existing.Name = req.Name + existing.Config = req.Config + existing.Type = req.Type + existing.Version = req.Version + + user := &models.User{ID: *userID} + existing.UpdatedBy = user + + jobs, err := s.db.GetJobsBySourceID([]int{existing.ID}) + if err != nil { + return fmt.Errorf("failed to fetch jobs for source update: %s", err) + } + + if err := cancelAllJobWorkflows(ctx, s.temporal, jobs, projectID); err != nil { + return fmt.Errorf("failed to cancel workflows for source update: %s", err) + } + + if err := s.db.UpdateSource(existing); err != nil { + return fmt.Errorf("failed to update source: %s", err) + } + + telemetry.TrackSourcesStatus(ctx) + return nil +} + +func (s *ETLService) DeleteSource(ctx context.Context, id int) (*dto.DeleteSourceResponse, error) { + src, err := s.db.GetSourceByID(id) + if err != nil { + return nil, fmt.Errorf("failed to find source: %s", err) + } + + jobs, err := s.db.GetJobsBySourceID([]int{id}) + if err != nil { + return nil, fmt.Errorf("failed to retrieve jobs for source deletion: %s", err) + } + if len(jobs) > 0 { + return nil, fmt.Errorf("cannot delete source '%s' id[%d] because it is used in %d jobs; please delete the associated jobs first", src.Name, id, len(jobs)) + } + jobIDs := make([]int, 0, len(jobs)) + for _, job := range jobs { + jobIDs = append(jobIDs, job.ID) + } + + if err := s.db.DeactivateJobs(jobIDs); err != nil { + return nil, fmt.Errorf("failed to update jobs for source deletion: %s", err) + } + + if err := s.db.DeleteSource(id); err != nil { + return nil, fmt.Errorf("failed to delete source: %s", err) + } + + telemetry.TrackSourcesStatus(ctx) + return &dto.DeleteSourceResponse{Name: src.Name}, nil +} + +func (s *ETLService) TestSourceConnection(ctx context.Context, req *dto.SourceTestConnectionRequest) (map[string]interface{}, []map[string]interface{}, error) { + if s.temporal == nil { + return nil, nil, fmt.Errorf("temporal client not available") + } + + encryptedConfig, err := utils.Encrypt(req.Config) + if err != nil { + return nil, nil, fmt.Errorf("failed to encrypt config for test connection: %s", err) + } + workflowID := fmt.Sprintf("test-connection-%s-%d", req.Type, time.Now().Unix()) + result, err := s.temporal.VerifyDriverCredentials(ctx, workflowID, "config", req.Type, req.Version, encryptedConfig) + // TODO: handle from frontend + if result == nil { + result = map[string]interface{}{ + "message": err.Error(), + "status": "failed", + } + } + + if err != nil { + return result, nil, fmt.Errorf("connection test failed: %s", err) + } + homeDir := constants.DefaultConfigDir + mainLogDir := filepath.Join(homeDir, workflowID) + logs, err := utils.ReadLogs(mainLogDir) + if err != nil { + return result, nil, fmt.Errorf("failed to read logs source_type[%s] source_version[%s]: %s", + req.Type, req.Version, err) + } + + return result, logs, nil +} + +func (s *ETLService) GetSourceCatalog(ctx context.Context, req *dto.StreamsRequest) (map[string]interface{}, error) { + oldStreams := "" + if req.JobID >= 0 { + job, err := s.db.GetJobByID(req.JobID, true) + if err != nil { + return nil, fmt.Errorf("failed to find job for catalog: %s", err) + } + oldStreams = job.StreamsConfig + } + + encryptedConfig, err := utils.Encrypt(req.Config) + if err != nil { + return nil, fmt.Errorf("failed to encrypt config for catalog: %s", err) + } + + newStreams, err := s.temporal.DiscoverStreams( + ctx, + req.Type, + req.Version, + encryptedConfig, + oldStreams, + req.JobName, + ) + if err != nil { + return nil, fmt.Errorf("failed to get catalog: %s", err) + } + + return newStreams, nil +} + +func (s *ETLService) GetSourceJobs(_ context.Context, id int) ([]*models.Job, error) { + if _, err := s.db.GetSourceByID(id); err != nil { + return nil, fmt.Errorf("failed to find source: %s", err) + } + + jobs, err := s.db.GetJobsBySourceID([]int{id}) + if err != nil { + return nil, fmt.Errorf("failed to get jobs by source: %s", err) + } + + return jobs, nil +} + +func (s *ETLService) GetSourceVersions(ctx context.Context, sourceType string) (map[string]interface{}, error) { + imageName := fmt.Sprintf("olakego/source-%s", sourceType) + versions, _, err := utils.GetDriverImageTags(ctx, imageName, true) + if err != nil { + return nil, fmt.Errorf("failed to get Docker versions: %s", err) + } + + return map[string]interface{}{"version": versions}, nil +} + +// TODO: cache spec in db for each version +func (s *ETLService) GetSourceSpec(ctx context.Context, req *dto.SpecRequest) (dto.SpecResponse, error) { + specOut, err := s.temporal.GetDriverSpecs(ctx, "", req.Type, req.Version) + if err != nil { + return dto.SpecResponse{}, fmt.Errorf("failed to get spec: %s", err) + } + + return dto.SpecResponse{ + Version: req.Version, + Type: req.Type, + Spec: specOut.Spec, + }, nil +} diff --git a/server/internal/services/etl/user.go b/server/internal/services/etl/user.go new file mode 100644 index 00000000..fa34008c --- /dev/null +++ b/server/internal/services/etl/user.go @@ -0,0 +1,51 @@ +package services + +import ( + "context" + "fmt" + + "github.com/datazip-inc/olake-ui/server/internal/models" +) + +// User-related methods on AppService + +func (s *ETLService) CreateUser(_ context.Context, req *models.User) error { + if err := s.db.CreateUser(req); err != nil { + return fmt.Errorf("failed to create user: %s", err) + } + + return nil +} + +func (s *ETLService) GetAllUsers(_ context.Context) ([]*models.User, error) { + users, err := s.db.ListUsers() + if err != nil { + return nil, fmt.Errorf("failed to list users: %s", err) + } + return users, nil +} + +func (s *ETLService) UpdateUser(_ context.Context, id int, req *models.User) (*models.User, error) { + existingUser, err := s.db.GetUserByID(id) + if err != nil { + return nil, fmt.Errorf("failed to find user: %s", err) + } + + existingUser.Username = req.Username + existingUser.Email = req.Email + + if err := s.db.UpdateUser(existingUser); err != nil { + return nil, fmt.Errorf("failed to update user: %s", err) + } + + return existingUser, nil +} + +func (s *ETLService) DeleteUser(_ context.Context, id int) error { + if err := s.db.DeleteUser(id); err != nil { + return fmt.Errorf("failed to delete user: %s", err) + } + return nil +} + +// removed: duplicate of auth.GetUserByID diff --git a/server/internal/services/etl/utils.go b/server/internal/services/etl/utils.go new file mode 100644 index 00000000..9229f541 --- /dev/null +++ b/server/internal/services/etl/utils.go @@ -0,0 +1,110 @@ +package services + +import ( + "context" + "fmt" + "strings" + "time" + + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/internal/services/temporal" + "go.temporal.io/api/workflowservice/v1" +) + +func cancelAllJobWorkflows(ctx context.Context, tempClient *temporal.Temporal, jobs []*models.Job, projectID string) error { + if len(jobs) == 0 { + return nil + } + + // Build combined query + var conditions []string + for _, job := range jobs { + conditions = append(conditions, fmt.Sprintf( + "(WorkflowId BETWEEN 'sync-%s-%d' AND 'sync-%s-%d-~')", + projectID, job.ID, projectID, job.ID, + )) + } + + query := fmt.Sprintf("(%s) AND ExecutionStatus = 'Running'", strings.Join(conditions, " OR ")) + + // List all running workflows at once + resp, err := tempClient.ListWorkflow(ctx, &workflowservice.ListWorkflowExecutionsRequest{ + Query: query, + }) + if err != nil { + return fmt.Errorf("list workflows failed: %s", err) + } + if len(resp.Executions) == 0 { + return nil // no running workflows + } + + // Cancel each found workflow (still a loop, but only one list RPC) + for _, wfExec := range resp.Executions { + if err := tempClient.CancelWorkflow(ctx, + wfExec.Execution.WorkflowId, wfExec.Execution.RunId); err != nil { + return fmt.Errorf("failed to cancel workflow[%s]: %s", wfExec.Execution.WorkflowId, err) + } + } + return nil +} + +func buildJobDataItems(jobs []*models.Job, tempClient *temporal.Temporal, contextType string) ([]dto.JobDataItem, error) { + jobItems := make([]dto.JobDataItem, 0) + for _, job := range jobs { + jobInfo := dto.JobDataItem{ + Name: job.Name, + ID: job.ID, + Activate: job.Active, + } + + // Set source/destination info based on context + if contextType == "source" && job.DestID != nil { + jobInfo.DestinationName = job.DestID.Name + jobInfo.DestinationType = job.DestID.DestType + } else if contextType == "destination" && job.SourceID != nil { + jobInfo.SourceName = job.SourceID.Name + jobInfo.SourceType = job.SourceID.Type + } + + if err := setJobWorkflowInfo(&jobInfo, job.ID, job.ProjectID, tempClient); err != nil { + return nil, fmt.Errorf("failed to set job workflow info: %s", err) + } + jobItems = append(jobItems, jobInfo) + } + + return jobItems, nil +} + +func setUsernames(createdBy, updatedBy *string, createdByUser, updatedByUser *models.User) { + if createdByUser != nil { + *createdBy = createdByUser.Username + } + if updatedByUser != nil { + *updatedBy = updatedByUser.Username + } +} + +// setJobWorkflowInfo fetches and sets workflow execution information for a job +// Returns false if an error occurred that should stop processing +func setJobWorkflowInfo(jobInfo *dto.JobDataItem, jobID int, projectID string, tempClient *temporal.Temporal) error { + query := fmt.Sprintf("WorkflowId between 'sync-%s-%d' and 'sync-%s-%d-~'", projectID, jobID, projectID, jobID) + + resp, err := tempClient.ListWorkflow(context.Background(), &workflowservice.ListWorkflowExecutionsRequest{ + Query: query, + PageSize: 1, + }) + + if err != nil { + return fmt.Errorf("failed to list workflows: %s", err) + } + + if len(resp.Executions) > 0 { + jobInfo.LastRunTime = resp.Executions[0].StartTime.AsTime().Format(time.RFC3339) + jobInfo.LastRunState = resp.Executions[0].Status.String() + } else { + jobInfo.LastRunTime = "" + jobInfo.LastRunState = "" + } + return nil +} diff --git a/server/internal/services/temporal/client.go b/server/internal/services/temporal/client.go new file mode 100644 index 00000000..1d41de9f --- /dev/null +++ b/server/internal/services/temporal/client.go @@ -0,0 +1,144 @@ +package temporal + +import ( + "context" + "fmt" + "time" + + "github.com/beego/beego/v2/server/web" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils" + "go.temporal.io/api/enums/v1" + "go.temporal.io/api/workflowservice/v1" + "go.temporal.io/sdk/client" +) + +type Temporal struct { + Client client.Client + taskQueue string +} + +// NewClient creates a new Temporal client +func NewClient() (*Temporal, error) { + temporalAddress, err := web.AppConfig.String(constants.ConfTemporalAddress) + if err != nil { + return nil, fmt.Errorf("failed to get temporal address: %s", err) + } + + var temporalClient *Temporal + err = utils.RetryWithBackoff(func() error { + client, dialErr := client.Dial(client.Options{ + HostPort: temporalAddress, + }) + if dialErr != nil { + return fmt.Errorf("failed to create temporal client: %s", dialErr) + } + + temporalClient = &Temporal{ + Client: client, + taskQueue: constants.TemporalTaskQueue, + } + return nil + }, 3, time.Second) + if err != nil { + return nil, err + } + + return temporalClient, nil +} + +// Close closes the Temporal client +func (t *Temporal) Close() { + if t.Client != nil { + t.Client.Close() + } +} + +func (t *Temporal) WorkflowAndScheduleID(projectID string, jobID int) (string, string) { + workflowID := fmt.Sprintf("sync-%s-%d", projectID, jobID) + return workflowID, fmt.Sprintf("schedule-%s", workflowID) +} + +// createSchedule creates a new schedule +func (t *Temporal) CreateSchedule(ctx context.Context, job *models.Job) error { + workflowID, scheduleID := t.WorkflowAndScheduleID(job.ProjectID, job.ID) + cronExpression := utils.ToCron(job.Frequency) + + req := buildExecutionReqForSync(job, workflowID) + + _, err := t.Client.ScheduleClient().Create(ctx, client.ScheduleOptions{ + ID: scheduleID, + Spec: client.ScheduleSpec{ + CronExpressions: []string{cronExpression}, + }, + Action: &client.ScheduleWorkflowAction{ + ID: workflowID, + Workflow: RunSyncWorkflow, + Args: []any{req}, + TaskQueue: t.taskQueue, + }, + Overlap: enums.SCHEDULE_OVERLAP_POLICY_SKIP, + }) + return err +} + +// updateSchedule updates an existing schedule +func (t *Temporal) UpdateSchedule(ctx context.Context, frequency, projectID string, jobID int) error { + cronExpression := utils.ToCron(frequency) + _, scheduleID := t.WorkflowAndScheduleID(projectID, jobID) + + handle := t.Client.ScheduleClient().GetHandle(ctx, scheduleID) + return handle.Update(ctx, client.ScheduleUpdateOptions{ + DoUpdate: func(input client.ScheduleUpdateInput) (*client.ScheduleUpdate, error) { + input.Description.Schedule.Spec = &client.ScheduleSpec{ + CronExpressions: []string{cronExpression}, + } + return &client.ScheduleUpdate{ + Schedule: &input.Description.Schedule, + }, nil + }, + }) +} + +func (t *Temporal) PauseSchedule(ctx context.Context, projectID string, jobID int) error { + _, scheduleID := t.WorkflowAndScheduleID(projectID, jobID) + return t.Client.ScheduleClient().GetHandle(ctx, scheduleID).Pause(ctx, client.SchedulePauseOptions{ + Note: "user paused the schedule", + }) +} + +func (t *Temporal) ResumeSchedule(ctx context.Context, projectID string, jobID int) error { + _, scheduleID := t.WorkflowAndScheduleID(projectID, jobID) + return t.Client.ScheduleClient().GetHandle(ctx, scheduleID).Unpause(ctx, client.ScheduleUnpauseOptions{ + Note: "user resumed the schedule", + }) +} + +func (t *Temporal) DeleteSchedule(ctx context.Context, projectID string, jobID int) error { + _, scheduleID := t.WorkflowAndScheduleID(projectID, jobID) + return t.Client.ScheduleClient().GetHandle(ctx, scheduleID).Delete(ctx) +} + +func (t *Temporal) TriggerSchedule(ctx context.Context, projectID string, jobID int) error { + _, scheduleID := t.WorkflowAndScheduleID(projectID, jobID) + return t.Client.ScheduleClient().GetHandle(ctx, scheduleID).Trigger(ctx, client.ScheduleTriggerOptions{ + Overlap: enums.SCHEDULE_OVERLAP_POLICY_SKIP, + }) +} + +// cancelWorkflow cancels a workflow execution +func (t *Temporal) CancelWorkflow(ctx context.Context, workflowID, runID string) error { + return t.Client.CancelWorkflow(ctx, workflowID, runID) +} + +// ListWorkflow lists workflow executions based on the provided query +func (t *Temporal) ListWorkflow(ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest) (*workflowservice.ListWorkflowExecutionsResponse, error) { + // Query workflows using the SDK's ListWorkflow method + resp, err := t.Client.ListWorkflow(ctx, request) + if err != nil { + return nil, fmt.Errorf("error listing workflow executions: %s", err) + } + + return resp, nil +} diff --git a/server/internal/services/temporal/execute.go b/server/internal/services/temporal/execute.go new file mode 100644 index 00000000..a8bb4e88 --- /dev/null +++ b/server/internal/services/temporal/execute.go @@ -0,0 +1,210 @@ +package temporal + +import ( + "context" + "fmt" + "time" + + "github.com/beego/beego/v2/server/web" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "go.temporal.io/sdk/client" + "golang.org/x/mod/semver" +) + +const ( + RunSyncWorkflow = "RunSyncWorkflow" + ExecuteWorkflow = "ExecuteWorkflow" +) + +type Command string + +type JobConfig struct { + Name string `json:"name"` + Data string `json:"data"` +} + +type ExecutionRequest struct { + Type string `json:"type"` + Command Command `json:"command"` + ConnectorType string `json:"connector_type"` + Version string `json:"version"` + Args []string `json:"args"` + Configs []JobConfig `json:"configs"` + WorkflowID string `json:"workflow_id"` + JobID int `json:"job_id"` + Timeout time.Duration `json:"timeout"` + OutputFile string `json:"output_file"` // to get the output file from the workflow +} + +const ( + Discover Command = "discover" + Check Command = "check" + Sync Command = "sync" + Spec Command = "spec" +) + +// DiscoverStreams runs a workflow to discover catalog data +func (t *Temporal) DiscoverStreams(ctx context.Context, sourceType, version, config, streamsConfig, jobName string) (map[string]interface{}, error) { + workflowID := fmt.Sprintf("discover-catalog-%s-%d", sourceType, time.Now().Unix()) + + configs := []JobConfig{ + {Name: "config.json", Data: config}, + {Name: "streams.json", Data: streamsConfig}, + } + + cmdArgs := []string{ + "discover", + "--config", + "/mnt/config/config.json", + } + + if jobName != "" && semver.Compare(version, "v0.2.0") >= 0 { + cmdArgs = append(cmdArgs, "--destination-database-prefix", jobName) + } + + if streamsConfig != "" { + cmdArgs = append(cmdArgs, "--catalog", "/mnt/config/streams.json") + } + + if encryptionKey, _ := web.AppConfig.String(constants.ConfEncryptionKey); encryptionKey != "" { + cmdArgs = append(cmdArgs, "--encryption-key", encryptionKey) + } + + req := &ExecutionRequest{ + Type: "docker", + Command: Discover, + ConnectorType: sourceType, + Version: version, + Args: cmdArgs, + Configs: configs, + WorkflowID: workflowID, + JobID: 0, + Timeout: GetWorkflowTimeout(Discover), + OutputFile: "streams.json", + } + + workflowOptions := client.StartWorkflowOptions{ + ID: workflowID, + TaskQueue: t.taskQueue, + } + + run, err := t.Client.ExecuteWorkflow(ctx, workflowOptions, ExecuteWorkflow, req) + if err != nil { + return nil, fmt.Errorf("failed to execute discover workflow: %s", err) + } + + result, err := ExtractWorkflowResponse(ctx, run) + if err != nil { + return nil, fmt.Errorf("failed to extract workflow response: %v", err) + } + + return result, nil +} + +// FetchSpec runs a workflow to fetch driver specifications +func (t *Temporal) GetDriverSpecs(ctx context.Context, destinationType, sourceType, version string) (dto.SpecOutput, error) { + workflowID := fmt.Sprintf("fetch-spec-%s-%d", sourceType, time.Now().Unix()) + + // spec version >= DefaultSpecVersion is required + if semver.Compare(version, constants.DefaultSpecVersion) < 0 { + version = constants.DefaultSpecVersion + } + + cmdArgs := []string{ + "spec", + } + if destinationType != "" { + cmdArgs = append(cmdArgs, "--destination-type", destinationType) + } + + req := &ExecutionRequest{ + Type: "docker", + Command: Spec, + ConnectorType: sourceType, + Version: version, + Args: cmdArgs, + Configs: nil, + WorkflowID: workflowID, + JobID: 0, + Timeout: GetWorkflowTimeout(Spec), + OutputFile: "", + } + + workflowOptions := client.StartWorkflowOptions{ + ID: workflowID, + TaskQueue: t.taskQueue, + } + + run, err := t.Client.ExecuteWorkflow(ctx, workflowOptions, ExecuteWorkflow, req) + if err != nil { + return dto.SpecOutput{}, fmt.Errorf("failed to execute fetch spec workflow: %s", err) + } + + result, err := ExtractWorkflowResponse(ctx, run) + if err != nil { + return dto.SpecOutput{}, fmt.Errorf("failed to extract workflow response: %v", err) + } + + return dto.SpecOutput{ + Spec: result, + }, nil +} + +// TestConnection runs a workflow to test connection +func (t *Temporal) VerifyDriverCredentials(ctx context.Context, workflowID, flag, sourceType, version, config string) (map[string]interface{}, error) { + configs := []JobConfig{ + {Name: "config.json", Data: config}, + } + + cmdArgs := []string{ + "check", + fmt.Sprintf("--%s", flag), + "/mnt/config/config.json", + } + if encryptionKey, _ := web.AppConfig.String(constants.ConfEncryptionKey); encryptionKey != "" { + cmdArgs = append(cmdArgs, "--encryption-key", encryptionKey) + } + + req := &ExecutionRequest{ + Type: "docker", + Command: Check, + ConnectorType: sourceType, + Version: version, + Args: cmdArgs, + Configs: configs, + WorkflowID: workflowID, + Timeout: GetWorkflowTimeout(Check), + } + + workflowOptions := client.StartWorkflowOptions{ + ID: workflowID, + TaskQueue: t.taskQueue, + } + + run, err := t.Client.ExecuteWorkflow(ctx, workflowOptions, ExecuteWorkflow, req) + if err != nil { + return nil, fmt.Errorf("failed to execute test connection workflow: %s", err) + } + + result, err := ExtractWorkflowResponse(ctx, run) + if err != nil { + return nil, fmt.Errorf("failed to extract workflow response: %v", err) + } + + connectionStatus, ok := result["connectionStatus"].(map[string]interface{}) + if !ok || connectionStatus == nil { + return nil, fmt.Errorf("connection status not found") + } + + status, statusOk := connectionStatus["status"].(string) + message, _ := connectionStatus["message"].(string) // message is optional + if !statusOk { + return nil, fmt.Errorf("connection status not found") + } + + return map[string]interface{}{ + "message": message, + "status": status, + }, nil +} diff --git a/server/internal/services/temporal/utils.go b/server/internal/services/temporal/utils.go new file mode 100644 index 00000000..1c234bb4 --- /dev/null +++ b/server/internal/services/temporal/utils.go @@ -0,0 +1,102 @@ +package temporal + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/datazip-inc/olake-ui/server/internal/models" + "go.temporal.io/sdk/client" +) + +// buildExecutionReqForSync builds the ExecutionRequest for a sync job +func buildExecutionReqForSync(job *models.Job, workflowID string) ExecutionRequest { + args := []string{ + "sync", + "--config", "/mnt/config/source.json", + "--destination", "/mnt/config/destination.json", + "--catalog", "/mnt/config/streams.json", + "--state", "/mnt/config/state.json", + } + + return ExecutionRequest{ + Type: "docker", + Command: Sync, + ConnectorType: job.SourceID.Type, + Version: job.SourceID.Version, + Args: args, + WorkflowID: workflowID, + JobID: job.ID, + Timeout: GetWorkflowTimeout(Sync), + OutputFile: "state.json", + } +} + +// extractWorkflowResponse extracts and parses the JSON response from a workflow execution result +func ExtractWorkflowResponse(ctx context.Context, run client.WorkflowRun) (map[string]interface{}, error) { + var result map[string]interface{} + if err := run.Get(ctx, &result); err != nil { + return nil, fmt.Errorf("workflow execution failed: %v", err) + } + + response, ok := result["response"].(string) + if !ok { + return nil, fmt.Errorf("invalid response format from worker") + } + + jsonResponse, err := ExtractJSON(response) + if err != nil { + return nil, err + } + + return jsonResponse, nil +} + +func GetWorkflowTimeout(op Command) time.Duration { + switch op { + case Discover: + return time.Minute * 10 + case Check: + return time.Minute * 10 + case Spec: + return time.Minute * 5 + case Sync: + return time.Hour * 24 * 30 + // check what can the fallback time be + default: + return time.Minute * 5 + } +} + +// ExtractJSON extracts and returns the last valid JSON block from output +func ExtractJSON(output string) (map[string]interface{}, error) { + outputStr := strings.TrimSpace(output) + if outputStr == "" { + return nil, fmt.Errorf("empty output") + } + + lines := strings.Split(outputStr, "\n") + + // Find the last non-empty line with valid JSON + for i := len(lines) - 1; i >= 0; i-- { + line := strings.TrimSpace(lines[i]) + if line == "" { + continue + } + + start := strings.Index(line, "{") + end := strings.LastIndex(line, "}") + if start != -1 && end != -1 && end > start { + jsonPart := line[start : end+1] + var result map[string]interface{} + if err := json.Unmarshal([]byte(jsonPart), &result); err != nil { + continue // Skip invalid JSON + } + return result, nil + } + } + + return nil, fmt.Errorf("no valid JSON block found in output") +} diff --git a/server/internal/telemetry/job.go b/server/internal/telemetry/job.go deleted file mode 100644 index 16c91886..00000000 --- a/server/internal/telemetry/job.go +++ /dev/null @@ -1,45 +0,0 @@ -package telemetry - -import ( - "context" - "time" - - "github.com/beego/beego/v2/core/logs" - "github.com/datazip/olake-frontend/server/internal/models" -) - -// TrackJobCreation tracks the creation of a new job with relevant properties -func TrackJobCreation(ctx context.Context, job *models.Job) { - go func() { - if instance == nil || job == nil { - return - } - - properties := map[string]interface{}{ - "job_id": job.ID, - "job_name": job.Name, - "project_id": job.ProjectID, - "source_type": job.SourceID.Type, - "source_name": job.SourceID.Name, - "destination_type": job.DestID.DestType, - "destination_name": job.DestID.Name, - "frequency": job.Frequency, - "active": job.Active, - } - - if !job.CreatedAt.IsZero() { - properties["created_at"] = job.CreatedAt.Format(time.RFC3339) - } - - if err := TrackEvent(ctx, EventJobCreated, properties); err != nil { - logs.Debug("Failed to track job creation event: %s", err) - return - } - TrackJobEntity(ctx) - }() -} - -func TrackJobEntity(ctx context.Context) { - TrackSourcesStatus(ctx) - TrackDestinationsStatus(ctx) -} diff --git a/server/internal/temporal/README.md b/server/internal/temporal/README.md deleted file mode 100644 index 5627e66e..00000000 --- a/server/internal/temporal/README.md +++ /dev/null @@ -1,144 +0,0 @@ -# Temporal-based Docker Runner - -This package provides a Temporal-based implementation for running Docker commands. It offers improved reliability, observability, and error handling compared to the direct Docker command execution approach. - -## Features - -- Durable execution with automatic retries -- Detailed workflow history for debugging -- Heartbeats to track long-running operations -- Improved monitoring and visibility -- Better error handling and recovery - -## Prerequisites - -1. Install and run a local Temporal server: - -```bash -# Using docker-compose -docker-compose up -d --build - -# OR using Temporal CLI -temporal server start-dev -``` - -See [Temporal documentation](https://docs.temporal.io/clusters/quick-install) for more installation options. - -## Usage - -### Starting a Temporal Worker - -You need to run at least one worker to process workflow and activity tasks: - -```go -package main - -import ( - "log" - "os" - "os/signal" - "syscall" - - "github.com/datazip/olake-server/internal/temporal" -) - -func main() { - // Create and start a worker - worker, err := temporal.NewWorker("") - if err != nil { - log.Fatalf("Failed to create worker: %v", err) - } - - // Start the worker - go func() { - if err := worker.Start(); err != nil { - log.Fatalf("Failed to start worker: %v", err) - } - }() - - // Handle graceful shutdown - signalChan := make(chan os.Signal, 1) - signal.Notify(signalChan, os.Interrupt, syscall.SIGTERM) - <-signalChan - - log.Println("Shutting down worker...") - worker.Stop() -} -``` - -### Using the Temporal-based Docker Runner - -```go -package main - -import ( - "fmt" - "log" - - "github.com/datazip/olake-server/internal/docker" -) - -func main() { - // Create a Temporal-based runner - runner, err := docker.NewTemporalRunner("", "") - if err != nil { - log.Fatalf("Failed to create Temporal runner: %v", err) - } - defer runner.Close() - - // Example: Get catalog from PostgreSQL source - config := `{ - "host": "postgres", - "port": 5432, - "database": "example", - "username": "postgres", - "password": "postgres" - }` - - result, err := runner.GetCatalog("postgres", "latest", config, 1) - if err != nil { - log.Fatalf("Failed to get catalog: %v", err) - } - - fmt.Printf("Catalog result: %+v\n", result) -} -``` - -## Monitoring and Debugging - -You can access the Temporal Web UI to monitor and debug workflow executions: - -- Local development: http://localhost:8233 -- With standard Temporal: http://localhost:8080 - -The Web UI provides: -- Workflow execution history -- Activity details and failures -- Workflow retry information -- Query and signal capabilities - -## Advanced Usage - -### Custom Workflow Configurations - -You can customize workflow options like timeouts, retry policies, and task queues by modifying the Client implementation. - -### Running with a Production Temporal Cluster - -For production, configure your application to connect to your production Temporal cluster: - -```go -// Connect to production Temporal cluster -runner, err := docker.NewTemporalRunner("", "temporal.example.com:7233") -``` - -## Troubleshooting - -1. **Worker Not Processing Tasks:** Ensure the worker is running and registered to the same task queue. -2. **Connection Issues:** Verify Temporal server is running and accessible. -3. **Docker Execution Failures:** Check Docker is installed and available to the worker process. - -## Additional Resources - -- [Temporal Documentation](https://docs.temporal.io/) -- [Go SDK Documentation](https://pkg.go.dev/go.temporal.io/sdk) \ No newline at end of file diff --git a/server/internal/temporal/activities.go b/server/internal/temporal/activities.go deleted file mode 100644 index 8a5ac164..00000000 --- a/server/internal/temporal/activities.go +++ /dev/null @@ -1,118 +0,0 @@ -package temporal - -import ( - "context" - "fmt" - "time" - - "github.com/datazip/olake-frontend/server/internal/docker" - "github.com/datazip/olake-frontend/server/internal/models" - "go.temporal.io/sdk/activity" - "go.temporal.io/sdk/temporal" -) - -// DiscoverCatalogActivity runs the discover command to get catalog data -func DiscoverCatalogActivity(ctx context.Context, params *ActivityParams) (map[string]interface{}, error) { - logger := activity.GetLogger(ctx) - logger.Info("Starting sync activity", - "sourceType", params.SourceType, - "workflowID", params.WorkflowID) - - // Create a Docker runner with the default config directory - runner := docker.NewRunner(docker.GetDefaultConfigDir()) - - // Record heartbeat - activity.RecordHeartbeat(ctx, "Running sync command") - - // Execute the sync operation - result, err := runner.GetCatalog( - ctx, - params.SourceType, - params.Version, - params.Config, - params.WorkflowID, - params.StreamsConfig, - params.JobName, - ) - if err != nil { - logger.Error("Sync command failed", "error", err) - return result, fmt.Errorf("sync command failed: %v", err) - } - - return result, nil -} - -// FetchSpecActivity runs the spec command to get connector specifications -func FetchSpecActivity(ctx context.Context, params *ActivityParams) (models.SpecOutput, error) { - runner := docker.NewRunner(docker.GetDefaultConfigDir()) - return runner.FetchSpec(ctx, params.DestinationType, params.SourceType, params.Version, params.WorkflowID) -} - -// TestConnectionActivity runs the check command to test connection -func TestConnectionActivity(ctx context.Context, params *ActivityParams) (map[string]interface{}, error) { - // Create a Docker runner with the default config directory - runner := docker.NewRunner(docker.GetDefaultConfigDir()) - resp, err := runner.TestConnection(ctx, params.Flag, params.SourceType, params.Version, params.Config, params.WorkflowID) - return resp, err -} - -// SyncActivity runs the sync command to transfer data between source and destination -func SyncActivity(ctx context.Context, params *SyncParams) (map[string]interface{}, error) { - logger := activity.GetLogger(ctx) - logger.Info("Starting sync activity", "jobId", params.JobID, "workflowID", params.WorkflowID) - - activity.RecordHeartbeat(ctx, "Running sync command") - - type resErr struct { - res map[string]interface{} - err error - } - done := make(chan resErr, 1) - // excueting sync in a goroutine to prevent blocking and monitoring the sync progress - go func() { - runner := docker.NewRunner(docker.GetDefaultConfigDir()) - res, err := runner.RunSync(ctx, params.JobID, params.WorkflowID) - done <- resErr{res: res, err: err} - }() - - for { - select { - case <-ctx.Done(): - logger.Info("SyncActivity canceled, deferring cleanup to SyncCleanupActivity") - return nil, ctx.Err() - case r := <-done: - if r.err != nil { - // CRITICAL: Check if error is because context was cancelled - if ctx.Err() != nil { - logger.Info("Goroutine failed due to context cancellation", "dockerError", r.err) - return nil, ctx.Err() // Return cancellation error, not docker error - } - - logger.Error("Sync command failed", "error", r.err) - return r.res, temporal.NewNonRetryableApplicationError(r.err.Error(), "SyncFailed", r.err) - } - return r.res, nil - default: - activity.RecordHeartbeat(ctx, "sync in progress") - time.Sleep(1 * time.Second) - } - } -} - -// SyncCleanupActivity ensures container is fully stopped and state is persisted to database -func SyncCleanupActivity(ctx context.Context, params *SyncParams) error { - logger := activity.GetLogger(ctx) - logger.Info("Starting cleanup activity", "jobId", params.JobID, "workflowID", params.WorkflowID) - // Stop container gracefully - logger.Info("Stopping container for cleanup %s", params.WorkflowID) - if err := docker.StopContainer(ctx, params.WorkflowID); err != nil { - return temporal.NewNonRetryableApplicationError(err.Error(), "CleanupFailed", err) - } - runner := docker.NewRunner(docker.GetDefaultConfigDir()) - logger.Info("Persisting job state for workflowID %s", params.WorkflowID) - if err := runner.PersistJobStateFromFile(params.JobID, params.WorkflowID); err != nil { - return temporal.NewNonRetryableApplicationError(err.Error(), "CleanupFailed", err) - } - logger.Info("Cleanup completed successfully") - return nil -} diff --git a/server/internal/temporal/client.go b/server/internal/temporal/client.go deleted file mode 100644 index 7458f818..00000000 --- a/server/internal/temporal/client.go +++ /dev/null @@ -1,303 +0,0 @@ -package temporal - -import ( - "context" - "fmt" - "time" - - "github.com/beego/beego/v2/server/web" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/docker" - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/utils" - "go.temporal.io/api/enums/v1" - "go.temporal.io/api/workflowservice/v1" - "go.temporal.io/sdk/client" - "golang.org/x/mod/semver" -) - -// TaskQueue is the default task queue for Olake Docker workflows -const ( - DockerTaskQueue = "OLAKE_DOCKER_TASK_QUEUE" - K8sTaskQueue = "OLAKE_K8S_TASK_QUEUE" -) - -var TaskQueue string - -var ( - TemporalAddress string -) - -// SyncAction represents the type of action to perform -type SyncAction string - -const ( - ActionCreate SyncAction = "create" - ActionUpdate SyncAction = "update" - ActionDelete SyncAction = "delete" - ActionTrigger SyncAction = "trigger" - ActionPause SyncAction = "pause" - ActionUnpause SyncAction = "unpause" -) - -func init() { - TemporalAddress = web.AppConfig.DefaultString("TEMPORAL_ADDRESS", "localhost:7233") - - // Choose task queue based on deployment mode - deploymentMode := web.AppConfig.DefaultString("DEPLOYMENT_MODE", "docker") - if deploymentMode == "kubernetes" { - TaskQueue = K8sTaskQueue - } else { - TaskQueue = DockerTaskQueue - } -} - -// Client provides methods to interact with Temporal -type Client struct { - temporalClient client.Client -} - -// NewClient creates a new Temporal client -func NewClient() (*Client, error) { - c, err := client.Dial(client.Options{ - HostPort: TemporalAddress, - }) - if err != nil { - return nil, fmt.Errorf("failed to create Temporal client: %v", err) - } - - return &Client{ - temporalClient: c, - }, nil -} - -// Close closes the Temporal client -func (c *Client) Close() { - if c.temporalClient != nil { - c.temporalClient.Close() - } -} - -// GetCatalog runs a workflow to discover catalog data -func (c *Client) GetCatalog(ctx context.Context, sourceType, version, config, streamsConfig, jobName string) (map[string]interface{}, error) { - params := &ActivityParams{ - SourceType: sourceType, - Version: version, - Config: config, - WorkflowID: fmt.Sprintf("discover-catalog-%s-%d", sourceType, time.Now().Unix()), - Command: docker.Discover, - StreamsConfig: streamsConfig, - JobName: jobName, - } - - workflowOptions := client.StartWorkflowOptions{ - ID: params.WorkflowID, - TaskQueue: TaskQueue, - } - - run, err := c.temporalClient.ExecuteWorkflow(ctx, workflowOptions, DiscoverCatalogWorkflow, params) - if err != nil { - return nil, fmt.Errorf("failed to execute discover workflow: %v", err) - } - - var result map[string]interface{} - if err := run.Get(ctx, &result); err != nil { - return nil, fmt.Errorf("workflow execution failed: %v", err) - } - - return result, nil -} - -// FetchSpec runs a workflow to fetch connector specifications -func (c *Client) FetchSpec(ctx context.Context, destinationType, sourceType, version string) (models.SpecOutput, error) { - // spec version >= DefaultSpecVersion is required - if semver.Compare(version, constants.DefaultSpecVersion) < 0 { - version = constants.DefaultSpecVersion - } - - params := &ActivityParams{ - SourceType: sourceType, - Version: version, - WorkflowID: fmt.Sprintf("fetch-spec-%s-%d", sourceType, time.Now().Unix()), - DestinationType: destinationType, - } - - workflowOptions := client.StartWorkflowOptions{ - ID: params.WorkflowID, - TaskQueue: TaskQueue, - } - - run, err := c.temporalClient.ExecuteWorkflow(ctx, workflowOptions, FetchSpecWorkflow, params) - if err != nil { - return models.SpecOutput{}, fmt.Errorf("failed to execute fetch spec workflow: %v", err) - } - - var result models.SpecOutput - if err := run.Get(ctx, &result); err != nil { - return models.SpecOutput{}, fmt.Errorf("workflow execution failed: %v", err) - } - - return result, nil -} - -// TestConnection runs a workflow to test connection -func (c *Client) TestConnection(ctx context.Context, flag, sourceType, version, config string) (map[string]interface{}, error) { - params := &ActivityParams{ - SourceType: sourceType, - Version: version, - Config: config, - WorkflowID: fmt.Sprintf("test-connection-%s-%d", sourceType, time.Now().Unix()), - Command: docker.Check, - Flag: flag, - } - - workflowOptions := client.StartWorkflowOptions{ - ID: params.WorkflowID, - TaskQueue: TaskQueue, - } - - run, err := c.temporalClient.ExecuteWorkflow(ctx, workflowOptions, TestConnectionWorkflow, params) - if err != nil { - return nil, fmt.Errorf("failed to execute test connection workflow: %v", err) - } - - var result map[string]interface{} - if err := run.Get(ctx, &result); err != nil { - return nil, fmt.Errorf("workflow execution failed: %v", err) - } - - return result, nil -} - -// ManageSync handles all sync operations (create, update, delete, trigger) -func (c *Client) ManageSync(ctx context.Context, projectID string, jobID int, frequency string, action SyncAction) (map[string]interface{}, error) { - workflowID := fmt.Sprintf("sync-%s-%d", projectID, jobID) - scheduleID := fmt.Sprintf("schedule-%s", workflowID) - - handle := c.temporalClient.ScheduleClient().GetHandle(ctx, scheduleID) - currentSchedule, err := handle.Describe(ctx) - scheduleExists := err == nil - if action != ActionCreate && !scheduleExists { - return nil, fmt.Errorf("schedule does not exist") - } - switch action { - case ActionCreate: - if frequency == "" { - return nil, fmt.Errorf("frequency is required for creating schedule") - } - if scheduleExists { - return nil, fmt.Errorf("schedule already exists") - } - return c.createSchedule(ctx, handle, scheduleID, workflowID, frequency, jobID) - - case ActionUpdate: - if frequency == "" { - return nil, fmt.Errorf("frequency is required for updating schedule") - } - return c.updateSchedule(ctx, handle, currentSchedule, scheduleID, frequency) - - case ActionDelete: - if err := handle.Delete(ctx); err != nil { - return nil, fmt.Errorf("failed to delete schedule: %s", err) - } - return map[string]interface{}{"message": "Schedule deleted successfully"}, nil - - case ActionTrigger: - if err := handle.Trigger(ctx, client.ScheduleTriggerOptions{ - Overlap: enums.SCHEDULE_OVERLAP_POLICY_SKIP, - }); err != nil { - return nil, fmt.Errorf("failed to trigger schedule: %s", err) - } - return map[string]interface{}{"message": "Schedule triggered successfully"}, nil - case ActionPause: - if err := handle.Pause(ctx, client.SchedulePauseOptions{ - Note: "Paused via API", - }); err != nil { - return nil, fmt.Errorf("failed to pause schedule: %s", err) - } - return map[string]interface{}{"message": "Schedule paused successfully"}, nil - - case ActionUnpause: - if err := handle.Unpause(ctx, client.ScheduleUnpauseOptions{ - Note: "Unpaused via API", - }); err != nil { - return nil, fmt.Errorf("failed to unpause schedule: %s", err) - } - return map[string]interface{}{"message": "Schedule unpaused successfully"}, nil - - default: - return nil, fmt.Errorf("unsupported action: %s", action) - } -} - -// createSchedule creates a new schedule -func (c *Client) createSchedule(ctx context.Context, _ client.ScheduleHandle, scheduleID, workflowID, cronSpec string, jobID int) (map[string]interface{}, error) { - cronSpec = utils.ToCron(cronSpec) - _, err := c.temporalClient.ScheduleClient().Create(ctx, client.ScheduleOptions{ - ID: scheduleID, - Spec: client.ScheduleSpec{ - CronExpressions: []string{cronSpec}, - }, - Action: &client.ScheduleWorkflowAction{ - ID: workflowID, - Workflow: RunSyncWorkflow, - Args: []any{jobID}, - TaskQueue: TaskQueue, - }, - Overlap: enums.SCHEDULE_OVERLAP_POLICY_SKIP, - }) - - if err != nil { - return nil, fmt.Errorf("failed to create schedule: %s", err) - } - - return map[string]interface{}{ - "message": "Schedule created successfully", - "cron": cronSpec, - }, nil -} - -// updateSchedule updates an existing schedule -func (c *Client) updateSchedule(ctx context.Context, handle client.ScheduleHandle, currentSchedule *client.ScheduleDescription, _, cronSpec string) (map[string]interface{}, error) { - cronSpec = utils.ToCron(cronSpec) - // Check if update is needed - if len(currentSchedule.Schedule.Spec.CronExpressions) > 0 && - currentSchedule.Schedule.Spec.CronExpressions[0] == cronSpec { - return map[string]interface{}{"message": "Schedule already up to date"}, nil - } - - err := handle.Update(ctx, client.ScheduleUpdateOptions{ - DoUpdate: func(input client.ScheduleUpdateInput) (*client.ScheduleUpdate, error) { - input.Description.Schedule.Spec = &client.ScheduleSpec{ - CronExpressions: []string{cronSpec}, - } - return &client.ScheduleUpdate{ - Schedule: &input.Description.Schedule, - }, nil - }, - }) - - if err != nil { - return nil, fmt.Errorf("failed to update schedule: %s", err) - } - return map[string]interface{}{ - "message": "Schedule updated successfully", - "cron": cronSpec, - }, nil -} - -// cancelWorkflow cancels a workflow execution -func (c *Client) CancelWorkflow(ctx context.Context, workflowID, runID string) error { - return c.temporalClient.CancelWorkflow(ctx, workflowID, runID) -} - -// ListWorkflow lists workflow executions based on the provided query -func (c *Client) ListWorkflow(ctx context.Context, request *workflowservice.ListWorkflowExecutionsRequest) (*workflowservice.ListWorkflowExecutionsResponse, error) { - // Query workflows using the SDK's ListWorkflow method - resp, err := c.temporalClient.ListWorkflow(ctx, request) - if err != nil { - return nil, fmt.Errorf("error listing workflow executions: %v", err) - } - - return resp, nil -} diff --git a/server/internal/temporal/types.go b/server/internal/temporal/types.go deleted file mode 100644 index 036c8cec..00000000 --- a/server/internal/temporal/types.go +++ /dev/null @@ -1,41 +0,0 @@ -package temporal - -import "github.com/datazip/olake-frontend/server/internal/docker" - -// DockerCommandParams contains parameters for Docker commands (legacy) -type DockerCommandParams struct { - SourceType string - Version string - Config string - SourceID int - Command string -} - -// ActivityParams contains parameters for Docker command activities -type ActivityParams struct { - DestinationType string - SourceType string - Version string - Config string - SourceID int - Command docker.Command - DestConfig string - DestID int - WorkflowID string - StreamsConfig string - Flag string - JobName string -} - -// SyncParams contains parameters for sync activities -type SyncParams struct { - JobID int - WorkflowID string - JobName string - CreatedBy string - CreatedAt string - SourceType string - SourceName string - DestinationType string - DestinationName string -} diff --git a/server/internal/temporal/worker.go b/server/internal/temporal/worker.go deleted file mode 100644 index 677fe44a..00000000 --- a/server/internal/temporal/worker.go +++ /dev/null @@ -1,56 +0,0 @@ -package temporal - -import ( - "fmt" - - "go.temporal.io/sdk/client" - "go.temporal.io/sdk/worker" -) - -// Worker handles Temporal worker functionality -type Worker struct { - temporalClient client.Client - worker worker.Worker -} - -// NewWorker creates a new Temporal worker -func NewWorker() (*Worker, error) { - c, err := client.Dial(client.Options{ - HostPort: TemporalAddress, - }) - if err != nil { - return nil, fmt.Errorf("failed to create Temporal client: %v", err) - } - - // Create a worker - w := worker.New(c, TaskQueue, worker.Options{}) - - // Register workflows - w.RegisterWorkflow(DiscoverCatalogWorkflow) - w.RegisterWorkflow(TestConnectionWorkflow) - w.RegisterWorkflow(RunSyncWorkflow) - w.RegisterWorkflow(FetchSpecWorkflow) - - // Register activities - w.RegisterActivity(DiscoverCatalogActivity) - w.RegisterActivity(TestConnectionActivity) - w.RegisterActivity(SyncActivity) - w.RegisterActivity(FetchSpecActivity) - w.RegisterActivity(SyncCleanupActivity) - - return &Worker{ - temporalClient: c, - worker: w, - }, nil -} - -// Start starts the worker -func (w *Worker) Start() error { - return w.worker.Start() -} - -// Stop stops the worker -func (w *Worker) Stop() { - w.worker.Stop() - w.temporalClient.Close() -} diff --git a/server/internal/temporal/workflows.go b/server/internal/temporal/workflows.go deleted file mode 100644 index 87cffc27..00000000 --- a/server/internal/temporal/workflows.go +++ /dev/null @@ -1,129 +0,0 @@ -package temporal - -import ( - "context" - "fmt" - "time" - - "github.com/datazip/olake-frontend/server/internal/models" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "go.temporal.io/sdk/temporal" - "go.temporal.io/sdk/workflow" -) - -// Retry policy constants -var ( - // DefaultRetryPolicy is used for standard operations like discovery and testing connections - DefaultRetryPolicy = &temporal.RetryPolicy{ - InitialInterval: time.Second * 15, - BackoffCoefficient: 2.0, - MaximumInterval: time.Minute * 10, - MaximumAttempts: 1, - } -) - -// DiscoverCatalogWorkflow is a workflow for discovering catalogs -func DiscoverCatalogWorkflow(ctx workflow.Context, params *ActivityParams) (map[string]interface{}, error) { - // Execute the DiscoverCatalogActivity directly - options := workflow.ActivityOptions{ - StartToCloseTimeout: time.Minute * 10, - RetryPolicy: DefaultRetryPolicy, - } - ctx = workflow.WithActivityOptions(ctx, options) - - var result map[string]interface{} - err := workflow.ExecuteActivity(ctx, DiscoverCatalogActivity, params).Get(ctx, &result) - if err != nil { - return nil, err - } - - return result, nil -} - -// FetchSpecWorkflow is a workflow for fetching connector specifications -func FetchSpecWorkflow(ctx workflow.Context, params *ActivityParams) (models.SpecOutput, error) { - // Execute the FetchSpecActivity directly - options := workflow.ActivityOptions{ - StartToCloseTimeout: time.Minute * 5, - HeartbeatTimeout: time.Minute * 1, - RetryPolicy: DefaultRetryPolicy, - } - ctx = workflow.WithActivityOptions(ctx, options) - - var result models.SpecOutput - err := workflow.ExecuteActivity(ctx, FetchSpecActivity, params).Get(ctx, &result) - if err != nil { - return models.SpecOutput{}, err - } - - return result, nil -} - -// TestConnectionWorkflow is a workflow for testing connections -func TestConnectionWorkflow(ctx workflow.Context, params *ActivityParams) (map[string]interface{}, error) { - // Execute the TestConnectionActivity directly - options := workflow.ActivityOptions{ - StartToCloseTimeout: time.Minute * 10, - RetryPolicy: DefaultRetryPolicy, - } - ctx = workflow.WithActivityOptions(ctx, options) - - var result map[string]interface{} - err := workflow.ExecuteActivity(ctx, TestConnectionActivity, params).Get(ctx, &result) - if err != nil { - return nil, err - } - - return result, nil -} - -// RunSyncWorkflow is a workflow for running data synchronization -func RunSyncWorkflow(ctx workflow.Context, jobID int) (result map[string]interface{}, err error) { - logger := workflow.GetLogger(ctx) - options := workflow.ActivityOptions{ - StartToCloseTimeout: time.Hour * 24 * 30, // 30 days - RetryPolicy: &temporal.RetryPolicy{ - InitialInterval: time.Second * 15, - BackoffCoefficient: 2.0, - MaximumInterval: time.Minute * 10, - MaximumAttempts: 0, - }, - WaitForCancellation: true, - HeartbeatTimeout: time.Minute * 1, - } - params := SyncParams{ - JobID: jobID, - WorkflowID: workflow.GetInfo(ctx).WorkflowExecution.ID, - } - - ctx = workflow.WithActivityOptions(ctx, options) - // Defer cleanup for cancellation - defer func() { - logger.Info("executing workflow cleanup...") - newCtx, _ := workflow.NewDisconnectedContext(ctx) - cleanupOptions := workflow.ActivityOptions{ - StartToCloseTimeout: time.Minute * 15, - RetryPolicy: DefaultRetryPolicy, - } - newCtx = workflow.WithActivityOptions(newCtx, cleanupOptions) - perr := workflow.ExecuteActivity(newCtx, SyncCleanupActivity, params).Get(newCtx, nil) - if perr != nil { - perr = fmt.Errorf("cleanup error: %s", perr) - if err != nil { - // preserve original err, just append cleanup info - err = fmt.Errorf("%s; cleanup error: %s", err, perr) - } - } - }() - - err = workflow.ExecuteActivity(ctx, SyncActivity, params).Get(ctx, &result) - if err != nil { - // Track sync failure event - telemetry.TrackSyncFailed(context.Background(), jobID, params.WorkflowID) - return nil, err - } - - // Track sync completion - telemetry.TrackSyncCompleted(context.Background(), jobID, params.WorkflowID) - return result, nil -} diff --git a/server/main.go b/server/main.go index 0dabb52b..b5eb5ba2 100644 --- a/server/main.go +++ b/server/main.go @@ -1,49 +1,43 @@ package main import ( - "os" - "github.com/beego/beego/v2/client/orm" - "github.com/beego/beego/v2/core/config" - "github.com/beego/beego/v2/core/logs" "github.com/beego/beego/v2/server/web" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/logger" - "github.com/datazip/olake-frontend/server/internal/telemetry" - "github.com/datazip/olake-frontend/server/routes" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/database" + "github.com/datazip-inc/olake-ui/server/internal/handlers" + services "github.com/datazip-inc/olake-ui/server/internal/services/etl" + "github.com/datazip-inc/olake-ui/server/routes" + "github.com/datazip-inc/olake-ui/server/utils/logger" + "github.com/datazip-inc/olake-ui/server/utils/telemetry" ) func main() { - // TODO: check if we have to create a new config file for docker compatibility - if key := os.Getenv(constants.EncryptionKey); key == "" { - logs.Warning("Encryption key is not set. This is not recommended for production environments.") - } - - // start telemetry service - telemetry.InitTelemetry() - - // check constants constants.Init() + logger.Init() + db, err := database.Init() + if err != nil { + logger.Fatalf("Failed to initialize database: %s", err) + return + } - // init logger - logsdir, _ := config.String("logsdir") - logger.InitLogger(logsdir) - - // init database - err := database.Init() + // Initialize unified AppService + appSvc, err := services.InitAppService(db) if err != nil { - logs.Critical("Failed to initialize database: %s", err) + logger.Fatalf("Failed to initialize services: %s", err) return } + logger.Info("Application services initialized successfully") + telemetry.InitTelemetry(db) - // init routers - routes.Init() + routes.Init(handlers.NewHandler(appSvc)) + if key, _ := web.AppConfig.String(constants.ConfEncryptionKey); key == "" { + logger.Warn("Encryption key is not set. This is not recommended for production environments.") + } - // setup environment mode if web.BConfig.RunMode == "dev" || web.BConfig.RunMode == "staging" { orm.Debug = true } - web.Run() + // TODO: handle gracefull shutdown } diff --git a/server/routes/router.go b/server/routes/router.go index f7ea7a72..3dc17344 100644 --- a/server/routes/router.go +++ b/server/routes/router.go @@ -5,7 +5,9 @@ import ( "github.com/beego/beego/v2/server/web" "github.com/beego/beego/v2/server/web/context" - "github.com/datazip/olake-frontend/server/internal/handlers" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/internal/handlers" + "github.com/datazip-inc/olake-ui/server/internal/handlers/middleware" ) // writeDefaultCorsHeaders sets common CORS headers @@ -31,59 +33,62 @@ func CustomCorsFilter(ctx *context.Context) { } } -func Init() { - if runmode, err := web.AppConfig.String("runmode"); err == nil && runmode == "localdev" { +func Init(h *handlers.Handler) { + if runmode, err := web.AppConfig.String(constants.ConfRunMode); err == nil && runmode == "localdev" { web.InsertFilter("*", web.BeforeRouter, CustomCorsFilter) } else { // Serve static frontend files web.SetStaticPath("", "/opt/frontend/dist") // Vite assets are in /assets // Serve index.html for React frontend - web.Router("/*", &handlers.FrontendHandler{}) // any other frontend route + web.Router("/*", h, "get:ServeFrontend") // any other frontend route } // Apply auth middleware to protected routes - web.InsertFilter("/api/v1/*", web.BeforeRouter, handlers.AuthMiddleware) + web.InsertFilter("/api/v1/*", web.BeforeRouter, middleware.AuthMiddleware) // Auth routes - web.Router("/login", &handlers.AuthHandler{}, "post:Login") - web.Router("/signup", &handlers.AuthHandler{}, "post:Signup") - web.Router("/auth/check", &handlers.AuthHandler{}, "get:CheckAuth") - web.Router("/telemetry-id", &handlers.AuthHandler{}, "get:GetTelemetryID") + web.Router("/login", h, "post:Login") + web.Router("/signup", h, "post:Signup") + web.Router("/auth/check", h, "get:CheckAuth") + web.Router("/telemetry-id", h, "get:GetTelemetryID") // User routes - web.Router("/api/v1/users", &handlers.UserHandler{}, "post:CreateUser") - web.Router("/api/v1/users", &handlers.UserHandler{}, "get:GetAllUsers") - web.Router("/api/v1/users/:id", &handlers.UserHandler{}, "put:UpdateUser") - web.Router("/api/v1/users/:id", &handlers.UserHandler{}, "delete:DeleteUser") + web.Router("/api/v1/users", h, "post:CreateUser") + web.Router("/api/v1/users", h, "get:GetAllUsers") + web.Router("/api/v1/users/:id", h, "put:UpdateUser") + web.Router("/api/v1/users/:id", h, "delete:DeleteUser") // Source routes - web.Router("/api/v1/project/:projectid/sources", &handlers.SourceHandler{}, "get:GetAllSources") - web.Router("/api/v1/project/:projectid/sources", &handlers.SourceHandler{}, "post:CreateSource") - web.Router("/api/v1/project/:projectid/sources/:id", &handlers.SourceHandler{}, "put:UpdateSource") - web.Router("/api/v1/project/:projectid/sources/:id", &handlers.SourceHandler{}, "delete:DeleteSource") - web.Router("/api/v1/project/:projectid/sources/test", &handlers.SourceHandler{}, "post:TestConnection") - web.Router("/api/v1/project/:projectid/sources/streams", &handlers.SourceHandler{}, "post:GetSourceCatalog") - web.Router("/api/v1/project/:projectid/sources/versions", &handlers.SourceHandler{}, "get:GetSourceVersions") - web.Router("/api/v1/project/:projectid/sources/spec", &handlers.SourceHandler{}, "post:GetProjectSourceSpec") + web.Router("/api/v1/project/:projectid/sources", h, "get:ListSources") + web.Router("/api/v1/project/:projectid/sources", h, "post:CreateSource") + web.Router("/api/v1/project/:projectid/sources/:id", h, "put:UpdateSource") + web.Router("/api/v1/project/:projectid/sources/:id", h, "delete:DeleteSource") + web.Router("/api/v1/project/:projectid/sources/test", h, "post:TestSourceConnection") + web.Router("/api/v1/project/:projectid/sources/streams", h, "post:GetSourceCatalog") + web.Router("/api/v1/project/:projectid/sources/versions", h, "get:GetSourceVersions") + web.Router("/api/v1/project/:projectid/sources/spec", h, "post:GetSourceSpec") // Destination routes - web.Router("/api/v1/project/:projectid/destinations", &handlers.DestHandler{}, "get:GetAllDestinations") - web.Router("/api/v1/project/:projectid/destinations", &handlers.DestHandler{}, "post:CreateDestination") - web.Router("/api/v1/project/:projectid/destinations/:id", &handlers.DestHandler{}, "put:UpdateDestination") - web.Router("/api/v1/project/:projectid/destinations/:id", &handlers.DestHandler{}, "delete:DeleteDestination") - web.Router("/api/v1/project/:projectid/destinations/test", &handlers.DestHandler{}, "post:TestConnection") - web.Router("/api/v1/project/:projectid/destinations/versions", &handlers.DestHandler{}, "get:GetDestinationVersions") - web.Router("/api/v1/project/:projectid/destinations/spec", &handlers.DestHandler{}, "post:GetDestinationSpec") + web.Router("/api/v1/project/:projectid/destinations", h, "get:ListDestinations") + web.Router("/api/v1/project/:projectid/destinations", h, "post:CreateDestination") + web.Router("/api/v1/project/:projectid/destinations/:id", h, "put:UpdateDestination") + web.Router("/api/v1/project/:projectid/destinations/:id", h, "delete:DeleteDestination") + web.Router("/api/v1/project/:projectid/destinations/test", h, "post:TestDestinationConnection") + web.Router("/api/v1/project/:projectid/destinations/versions", h, "get:GetDestinationVersions") + web.Router("/api/v1/project/:projectid/destinations/spec", h, "post:GetDestinationSpec") // Job routes - web.Router("/api/v1/project/:projectid/jobs", &handlers.JobHandler{}, "get:GetAllJobs") - web.Router("/api/v1/project/:projectid/jobs", &handlers.JobHandler{}, "post:CreateJob") - web.Router("/api/v1/project/:projectid/jobs/:id", &handlers.JobHandler{}, "put:UpdateJob") - web.Router("/api/v1/project/:projectid/jobs/:id", &handlers.JobHandler{}, "delete:DeleteJob") - web.Router("/api/v1/project/:projectid/jobs/:id/sync", &handlers.JobHandler{}, "post:SyncJob") - web.Router("/api/v1/project/:projectid/jobs/:id/activate", &handlers.JobHandler{}, "post:ActivateJob") - web.Router("/api/v1/project/:projectid/jobs/:id/tasks", &handlers.JobHandler{}, "get:GetJobTasks") - web.Router("/api/v1/project/:projectid/jobs/:id/cancel", &handlers.JobHandler{}, "get:CancelJobRun") - web.Router("/api/v1/project/:projectid/jobs/:id/tasks/:taskid/logs", &handlers.JobHandler{}, "post:GetTaskLogs") - web.Router("/api/v1/project/:projectid/jobs/check-unique", &handlers.JobHandler{}, "post:CheckUniqueJobName") + web.Router("/api/v1/project/:projectid/jobs", h, "get:ListJobs") + web.Router("/api/v1/project/:projectid/jobs", h, "post:CreateJob") + web.Router("/api/v1/project/:projectid/jobs/:id", h, "put:UpdateJob") + web.Router("/api/v1/project/:projectid/jobs/:id", h, "delete:DeleteJob") + web.Router("/api/v1/project/:projectid/jobs/:id/sync", h, "post:SyncJob") + web.Router("/api/v1/project/:projectid/jobs/:id/activate", h, "post:ActivateJob") + web.Router("/api/v1/project/:projectid/jobs/:id/tasks", h, "get:GetJobTasks") + web.Router("/api/v1/project/:projectid/jobs/:id/cancel", h, "get:CancelJobRun") + web.Router("/api/v1/project/:projectid/jobs/:id/tasks/:taskid/logs", h, "post:GetTaskLogs") + web.Router("/api/v1/project/:projectid/jobs/check-unique", h, "post:CheckUniqueJobName") + + // worker callback routes + web.Router("/internal/worker/callback/sync-telemetry", h, "post:UpdateSyncTelemetry") } diff --git a/server/tests/docker-compose.yml b/server/tests/docker-compose.yml new file mode 100644 index 00000000..f07ec699 --- /dev/null +++ b/server/tests/docker-compose.yml @@ -0,0 +1,52 @@ +version: '3.8' + +services: + postgres: + image: postgres:15 + container_name: olake_postgres-test + restart: unless-stopped + environment: + POSTGRES_USER: postgres + POSTGRES_PASSWORD: secret1234 + POSTGRES_DB: postgres + ports: + - "5433:5432" + volumes: + - ./pg_hba.conf:/etc/postgresql/pg_hba.conf + command: + - bash + - -c + - | + # Install wal2json + apt-get update && apt-get install -y postgresql-15-wal2json + + # Start PostgreSQL with basic config in background + docker-entrypoint.sh postgres \ + -c hba_file=/etc/postgresql/pg_hba.conf \ + -c listen_addresses='*' \ + -c wal_level=logical \ + -c max_wal_senders=10 \ + -c max_replication_slots=10 \ + -c shared_preload_libraries=wal2json & + + # Wait for PostgreSQL to be ready + while ! pg_isready -U postgres -h localhost -p 5432; do + sleep 1 + done + + # Create replication slot if it doesn't exist + psql -v ON_ERROR_STOP=1 -U postgres < /dev/null && + apt-get update && + apt-get install -y docker-compose-plugin && + update-ca-certificates + ` + + // Download destination docker-compose + // TODO: Either move destination and source config into the same Docker Compose setup or download both from the same location for consistency. + downloadDestinationComposeCmd = ` + cd /mnt && + curl -fsSL -o docker-compose.destination.yml \ + https://raw.githubusercontent.com/datazip-inc/olake/master/destination/iceberg/local-test/docker-compose.yml + ` + + // Start postgres test infrastructure + startPostgresCmd = ` + cd /mnt/server/tests && + docker compose up -d && + for i in $(seq 1 30); do + if docker exec olake_postgres-test psql -h localhost -U postgres -d postgres -c "SELECT 1" 2>/dev/null; then + echo "PostgreSQL ready." + break + fi + sleep 2 + done && + docker exec olake_postgres-test psql -U postgres -d postgres -c \ + "SELECT slot_name, plugin, slot_type, active FROM pg_replication_slots WHERE slot_name = 'olake_slot';" + ` + + // Start destination services (iceberg stack) + startDestinationCmd = ` + cd /mnt && + docker compose -f docker-compose.destination.yml up -d minio mc postgres spark-iceberg && + sleep 5 && + docker compose -f docker-compose.destination.yml ps + ` + + // Start OLake application + startOLakeCmd = ` + cd /mnt && + mkdir -p /mnt/olake-data && + docker compose up -d && + for i in $(seq 1 60); do + if curl -f http://localhost:8000/health 2>/dev/null || curl -f http://localhost:8000 2>/dev/null; then + echo "OLake UI ready." + break + fi + sleep 2 + done + ` + + // Network setup + networkSetupCmd = ` + docker network create olake-network || true && + docker network connect olake-network olake-ui || true && + docker network connect olake-network postgres || true && + docker network connect olake-network olake_postgres-test || true + ` + + // Install Playwright and dependencies + installPlaywrightCmd = ` + cd /mnt/ui && + pnpm add -D @playwright/test && + pnpm exec playwright install --with-deps chromium + ` + + // Run Playwright tests + runPlaywrightCmd = ` + cd /mnt/ui && + PLAYWRIGHT_TEST_BASE_URL=http://localhost:8000 DEBUG=pw:api npx playwright test tests/flows/job-end-to-end.spec.ts + ` + + icebergDB = "postgres_iceberg_jdbc_job_postgres_public" + icebergCatalog = "olake_iceberg" + currentTestTable = "postgres_test_table_olake" +) + +func DinDTestContainer(t *testing.T) error { + ctx, cancel := context.WithTimeout(context.Background(), 30*time.Minute) + defer cancel() + projectRoot, err := filepath.Abs(filepath.Join("..", "..")) + if err != nil { + return fmt.Errorf("could not determine project root: %w", err) + } + t.Logf("Project root identified at: %s", projectRoot) + + req := testcontainers.ContainerRequest{ + Image: "ubuntu:22.04", + Env: map[string]string{ + "DOCKER_TLS_CERTDIR": "", + "TELEMETRY_DISABLED": "true", + "TESTCONTAINERS_RYUK_DISABLED": "true", + "DEBIAN_FRONTEND": "noninteractive", + }, + HostConfigModifier: func(hc *container.HostConfig) { + hc.Privileged = true + hc.Binds = []string{ + fmt.Sprintf("%s:/mnt:rw", projectRoot), + } + // Tmpfs mounts create temporary in-memory filesystems inside the container. + // These directories behave like RAM disks they exist only in memory (not on disk) and are automatically cleaned up when the container stops. + // This is useful for high-performance temporary storage. + // 70GB for docker and 15GB for shared memory space in Linux + hc.Tmpfs = map[string]string{ + "/var/lib/docker": "size=70G", + "/dev/shm": "size=10G", + } + hc.Resources.Memory = 22 * 1024 * 1024 * 1024 // 22GB + hc.ExtraHosts = append(hc.ExtraHosts, "host.docker.internal:host-gateway") + }, + ConfigModifier: func(config *container.Config) { + config.WorkingDir = "/mnt" + }, + ExposedPorts: []string{"8000/tcp", "2375/tcp", "5433/tcp", "15002/tcp"}, + Cmd: []string{ + "/bin/sh", "-c", + `set -e + apt-get update -y && + apt-get install -y --no-install-recommends ca-certificates curl gnupg lsb-release iproute2 procps && + apt-get install -y --no-install-recommends docker.io && + mkdir -p /var/lib/docker /var/run/docker && + exec dockerd --host=tcp://0.0.0.0:2375 --host=unix:///var/run/docker.sock + `, + }, + WaitingFor: wait.ForExec([]string{"docker", "-H", "tcp://127.0.0.1:2375", "info"}).WithStartupTimeout(60 * time.Second).WithPollInterval(1 * time.Second), + } + + ctr, err := testcontainers.GenericContainer(ctx, testcontainers.GenericContainerRequest{ + ContainerRequest: req, + Started: true, + }) + if err != nil { + return fmt.Errorf("failed to start DinD container: %w", err) + } + + // container host + host, err := ctr.Host(ctx) + if err != nil { + return fmt.Errorf("failed to get host: %w", err) + } + + // postgres port + postgresPort, err := ctr.MappedPort(ctx, "5433/tcp") + if err != nil { + return fmt.Errorf("failed to get postgres port: %w", err) + } + + // iceberg-spark port + sparkPort, err := ctr.MappedPort(ctx, "15002/tcp") + if err != nil { + return fmt.Errorf("failed to get spark port: %w", err) + } + + t.Log("Docker daemon is ready") + // Step 1: Install tools + t.Log("Installing required tools...") + if code, out, err := ExecCommand(ctx, ctr, setupToolsCmd); err != nil || code != 0 { + return fmt.Errorf("tools installation failed (%d): %s\n%s", code, err, out) + } + + // Step 2: Download destination docker-compose + t.Log("Downloading destination docker-compose...") + if code, out, err := ExecCommand(ctx, ctr, downloadDestinationComposeCmd); err != nil || code != 0 { + return fmt.Errorf("destination docker-compose download failed (%d): %s\n%s", code, err, out) + } + + // Step 3: Start PostgreSQL test infrastructure + t.Log("Starting PostgreSQL test infrastructure...") + if code, out, err := ExecCommand(ctx, ctr, startPostgresCmd); err != nil || code != 0 { + return fmt.Errorf("postgres startup failed (%d): %s\n%s", code, err, out) + } + + // Step 4: Start destination services (Iceberg stack) + t.Log("Starting destination services...") + if code, out, err := ExecCommand(ctx, ctr, startDestinationCmd); err != nil || code != 0 { + return fmt.Errorf("destination services startup failed (%d): %s\n%s", code, err, out) + } + + // Step 5: Patch docker-compose for local images + t.Log("Patching docker-compose to build local images...") + if err := PatchDockerCompose(ctx, t, ctr); err != nil { + return err + } + + // Step 6: Start OLake application + t.Log("Starting OLake docker-compose services...") + if code, out, err := ExecCommand(ctx, ctr, startOLakeCmd); err != nil || code != 0 { + return fmt.Errorf("OLake startup failed (%d): %s\n%s", code, err, out) + } + + // Step 7: Setup networks + t.Log("Setting up Docker networks...") + if code, out, err := ExecCommand(ctx, ctr, networkSetupCmd); err != nil || code != 0 { + t.Logf("Warning: Network setup failed (%d): %s\n%s", code, err, out) + } + + // Step 8: Query the postgres source + ExecuteQuery(ctx, t, "create", host, postgresPort.Port()) + ExecuteQuery(ctx, t, "clean", host, postgresPort.Port()) + ExecuteQuery(ctx, t, "add", host, postgresPort.Port()) + + t.Logf("OLake UI is ready and accessible at: http://localhost:8000") + + // Step 9: Install Playwright + t.Log("Installing Playwright and dependencies...") + if code, out, err := ExecCommand(ctx, ctr, installPlaywrightCmd); err != nil || code != 0 { + return fmt.Errorf("playwright installation failed (%d): %s\n%s", code, err, out) + } + + // Step 10: Run Playwright tests + t.Log("Executing Playwright tests...") + if code, out, err := ExecCommandWithStreaming(ctx, t, ctr, runPlaywrightCmd); err != nil || code != 0 { + return fmt.Errorf("playwright tests failed (%d): %s\n%s", code, err, out) + } + t.Log("Playwright tests passed successfully.") + + // wait before verifying iceberg data + t.Log("Waiting for 20 seconds before verifying iceberg data...") + time.Sleep(20 * time.Second) + + // Step 11: Verify in iceberg + t.Logf("Starting Iceberg data verification...") + VerifyIcebergTest(ctx, t, ctr, host, sparkPort.Port()) + return nil +} + +// ExecCommandWithStreaming executes a command and streams output in real-time +func ExecCommandWithStreaming(ctx context.Context, t *testing.T, ctr testcontainers.Container, cmd string) (int, string, error) { + exitCode, reader, err := ctr.Exec(ctx, []string{"sh", "-c", cmd}) + if err != nil { + return -1, "", err + } + + var output strings.Builder + scanner := bufio.NewScanner(reader) + for scanner.Scan() { + line := scanner.Text() + t.Log(line) + output.WriteString(line + "\n") + } + + if err := scanner.Err(); err != nil { + return exitCode, output.String(), err + } + + return exitCode, output.String(), nil +} + +// PatchDockerCompose updates olake-ui to build from local code +// TODO: Remove patch command and find alternative to use local code +func PatchDockerCompose(ctx context.Context, t *testing.T, ctr testcontainers.Container) error { + patchCmd := ` + set -e + tmpfile=$(mktemp) + awk ' + BEGIN{svc="";} + /^ olake-ui:/{svc="olake-ui"; print; next} + /^ temporal-worker:/{svc="temporal-worker"; print; next} + /^ [A-Za-z0-9_-]+:/{ if (svc!="") svc=""; print; next} + { + if (svc=="olake-ui" && $0 ~ /^ image:/) { + print " build:"; + print " context: ."; + print " dockerfile: Dockerfile"; + next + } + print + } + ' /mnt/docker-compose.yml > "$tmpfile" && mv "$tmpfile" /mnt/docker-compose.yml +` + + code, out, err := ExecCommand(ctx, ctr, patchCmd) + if err != nil || code != 0 { + t.Logf("docker-compose patch output: %s", string(out)) + return fmt.Errorf("failed to patch docker-compose.yml (%d): %s\n%s", code, err, out) + } + t.Log("docker-compose.yml patched to build local images") + t.Logf("Patched docker-compose.yml:\n%s", string(out)) + + return nil +} + +func VerifyIcebergTest(ctx context.Context, t *testing.T, ctr testcontainers.Container, host, port string) { + sparkConnectAddress := fmt.Sprintf("sc://%s:%s", host, port) + spark, err := sql.NewSessionBuilder().Remote(sparkConnectAddress).Build(ctx) + require.NoError(t, err, "Failed to connect to Spark Connect server") + defer func() { + if stopErr := spark.Stop(); stopErr != nil { + t.Errorf("Failed to stop Spark session: %v", stopErr) + } + if ctr != nil { + t.Log("Running cleanup...") + // Stop docker-compose services + _, _, _ = ExecCommand(ctx, ctr, "cd /mnt && docker-compose down -v --remove-orphans") + // Terminate the DinD container + if err := ctr.Terminate(ctx); err != nil { + t.Logf("Warning: failed to terminate container: %v", err) + } + t.Log("Cleanup complete") + } + }() + countQuery := fmt.Sprintf( + "SELECT COUNT(DISTINCT _olake_id) as unique_count FROM %s.%s.%s", + icebergCatalog, icebergDB, currentTestTable, + ) + t.Logf("Executing query: %s", countQuery) + + countQueryDf, err := spark.Sql(ctx, countQuery) + require.NoError(t, err, "Failed to execute query on the table") + + rows, err := countQueryDf.Collect(ctx) + require.NoError(t, err, "Failed to collect data rows from Iceberg") + require.NotEmpty(t, rows, "No rows returned for _op_type = 'r'") + + // check count and verify + countValue := rows[0].Value("unique_count").(int64) + require.Equal(t, int64(5), countValue, "Expected count to be 5") + t.Logf("✅ Test passed: count value %v matches expected value 5", countValue) +} + +func ExecuteQuery(ctx context.Context, t *testing.T, operation, host, port string) { + t.Helper() + connStr := fmt.Sprintf("postgres://postgres@%s:%s/postgres?sslmode=disable", host, port) + db, ok := sqlx.ConnectContext(ctx, "postgres", connStr) + require.NoError(t, ok, "failed to connect to postgres") + defer func() { + require.NoError(t, db.Close(), "failed to close postgres connection") + }() + + // integration test uses only one stream for testing + integrationTestTable := currentTestTable + var query string + + switch operation { + case "create": + query = fmt.Sprintf(` + CREATE TABLE IF NOT EXISTS %s ( + col_bigint BIGINT, + col_bigserial BIGSERIAL PRIMARY KEY, + col_bool BOOLEAN, + col_char CHAR(1), + col_character CHAR(10), + col_character_varying VARCHAR(50), + col_date DATE, + col_decimal NUMERIC, + col_double_precision DOUBLE PRECISION, + col_float4 REAL, + col_int INT, + col_int2 SMALLINT, + col_integer INTEGER, + col_interval INTERVAL, + col_json JSON, + col_jsonb JSONB, + col_name NAME, + col_numeric NUMERIC, + col_real REAL, + col_text TEXT, + col_timestamp TIMESTAMP, + col_timestamptz TIMESTAMPTZ, + col_uuid UUID, + col_varbit VARBIT(20), + col_xml XML, + CONSTRAINT unique_custom_key UNIQUE (col_bigserial) + )`, integrationTestTable) + + case "drop": + query = fmt.Sprintf("DROP TABLE IF EXISTS %s", integrationTestTable) + + case "clean": + query = fmt.Sprintf("TRUNCATE TABLE %s", integrationTestTable) + + case "add": + insertTestData(ctx, t, db, integrationTestTable) + return // Early return since we handle all inserts in the helper function + + case "insert": + query = fmt.Sprintf(` + INSERT INTO %s ( + col_bigint, col_bool, col_char, col_character, + col_character_varying, col_date, col_decimal, + col_double_precision, col_float4, col_int, col_int2, + col_integer, col_interval, col_json, col_jsonb, + col_name, col_numeric, col_real, col_text, + col_timestamp, col_timestamptz, col_uuid, col_varbit, col_xml + ) VALUES ( + 123456789012345, TRUE, 'c', 'char_val', + 'varchar_val', '2023-01-01', 123.45, + 123.456789, 123.45, 123, 123, 12345, + '1 hour', '{"key": "value"}', '{"key": "value"}', + 'test_name', 123.45, 123.45, 'sample text', + '2023-01-01 12:00:00', '2023-01-01 12:00:00+00', + '123e4567-e89b-12d3-a456-426614174000', B'101010', + 'value' + )`, integrationTestTable) + + case "update": + query = fmt.Sprintf(` + UPDATE %s SET + col_bigint = 123456789012340, + col_bool = FALSE, + col_char = 'd', + col_character = 'updated__', + col_character_varying = 'updated val', + col_date = '2024-07-01', + col_decimal = 543.21, + col_double_precision = 987.654321, + col_float4 = 543.21, + col_int = 321, + col_int2 = 321, + col_integer = 54321, + col_interval = '2 hours', + col_json = '{"new": "json"}', + col_jsonb = '{"new": "jsonb"}', + col_name = 'updated_name', + col_numeric = 321.00, + col_real = 321.00, + col_text = 'updated text', + col_timestamp = '2024-07-01 15:30:00', + col_timestamptz = '2024-07-01 15:30:00+00', + col_uuid = '00000000-0000-0000-0000-000000000000', + col_varbit = B'111000', + col_xml = 'value' + WHERE col_bigserial = 1`, integrationTestTable) + + case "delete": + query = fmt.Sprintf("DELETE FROM %s WHERE col_bigserial = 1", integrationTestTable) + + default: + t.Fatalf("Unsupported operation: %s", operation) + } + _, err := db.ExecContext(ctx, query) + require.NoError(t, err, "Failed to execute %s operation", operation) +} + +// insertTestData inserts test data into the specified table +func insertTestData(ctx context.Context, t *testing.T, db *sqlx.DB, tableName string) { + t.Helper() + + for i := 1; i <= 5; i++ { + query := fmt.Sprintf(` + INSERT INTO %s ( + col_bigint, col_bigserial, col_bool, col_char, col_character, + col_character_varying, col_date, col_decimal, + col_double_precision, col_float4, col_int, col_int2, col_integer, + col_interval, col_json, col_jsonb, col_name, col_numeric, + col_real, col_text, col_timestamp, col_timestamptz, + col_uuid, col_varbit, col_xml + ) VALUES ( + 123456789012345, DEFAULT, TRUE, 'c', 'char_val', + 'varchar_val', '2023-01-01', 123.45, + 123.456789, 123.45, 123, 123, 12345, '1 hour', '{"key": "value"}', + '{"key": "value"}', 'test_name', 123.45, 123.45, + 'sample text', '2023-01-01 12:00:00', + '2023-01-01 12:00:00+00', + '123e4567-e89b-12d3-a456-426614174000', B'101010', + 'value' + )`, tableName) + + _, err := db.ExecContext(ctx, query) + require.NoError(t, err, "Failed to insert test data") + } +} + +// Helper function to execute container commands +func ExecCommand( + ctx context.Context, + c testcontainers.Container, + cmd string, +) (int, []byte, error) { + code, reader, err := c.Exec(ctx, []string{"/bin/sh", "-c", cmd}) + if err != nil { + return code, nil, err + } + output, _ := io.ReadAll(reader) + return code, output, nil +} diff --git a/server/tests/verify_test.go b/server/tests/verify_test.go new file mode 100644 index 00000000..49a31912 --- /dev/null +++ b/server/tests/verify_test.go @@ -0,0 +1,16 @@ +package tests + +import ( + "os" + "testing" + + _ "github.com/lib/pq" +) + +func TestDinDIntegration(t *testing.T) { + os.Setenv("TESTCONTAINERS_RYUK_DISABLED", "true") + err := DinDTestContainer(t) + if err != nil { + t.Errorf("Error in Docker in Docker container start up: %s", err) + } +} diff --git a/server/utils/docker_utils.go b/server/utils/docker_utils.go index 8aa786df..ec733182 100644 --- a/server/utils/docker_utils.go +++ b/server/utils/docker_utils.go @@ -15,8 +15,9 @@ import ( "github.com/aws/aws-sdk-go-v2/aws" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/ecr" - "github.com/beego/beego/v2/core/logs" "github.com/beego/beego/v2/server/web" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/utils/logger" "golang.org/x/mod/semver" ) @@ -67,9 +68,9 @@ func GetWorkerEnvVars() map[string]string { // GetDriverImageTags returns image tags from ECR or Docker Hub with fallback to cached images func GetDriverImageTags(ctx context.Context, imageName string, cachedTags bool) ([]string, string, error) { // TODO: make constants file and validate all env vars in start of server - repositoryBase, err := web.AppConfig.String("CONTAINER_REGISTRY_BASE") + repositoryBase, err := web.AppConfig.String(constants.ConfContainerRegistryBase) if err != nil { - return nil, "", fmt.Errorf("failed to get CONTAINER_REGISTRY_BASE: %v", err) + return nil, "", fmt.Errorf("failed to get CONTAINER_REGISTRY_BASE: %s", err) } var tags []string images := []string{imageName} @@ -87,7 +88,7 @@ func GetDriverImageTags(ctx context.Context, imageName string, cachedTags bool) // Fallback to cached if online fetch fails or explicitly requested if err != nil && cachedTags { - logs.Warn("failed to fetch image tags online for %s: %s, falling back to cached tags", imageName, err) + logger.Warn("failed to fetch image tags online for %s: %s, falling back to cached tags", imageName, err) tags, err = fetchCachedImageTags(ctx, imageName, repositoryBase) if err != nil { return nil, "", fmt.Errorf("failed to fetch cached image tags for %s: %s", imageName, err) diff --git a/server/utils/encryption.go b/server/utils/encryption.go index aa2aa3f0..6b6c6edb 100644 --- a/server/utils/encryption.go +++ b/server/utils/encryption.go @@ -11,12 +11,12 @@ import ( "errors" "fmt" "io" - "os" "strings" "github.com/aws/aws-sdk-go-v2/config" "github.com/aws/aws-sdk-go-v2/service/kms" - "github.com/datazip/olake-frontend/server/internal/constants" + "github.com/beego/beego/v2/server/web" + "github.com/datazip-inc/olake-ui/server/internal/constants" ) // utility provides encryption and decryption functionality using either AWS KMS or local AES-256-GCM. @@ -31,7 +31,7 @@ import ( func getSecretKey() ([]byte, *kms.Client, error) { // TODO: can we move this to constants and set key and kms client // TODO: use viper package to read environment variables - envKey := os.Getenv(constants.EncryptionKey) + envKey, _ := web.AppConfig.String(constants.ConfEncryptionKey) if strings.TrimSpace(envKey) == "" { return []byte{}, nil, nil // Encryption is disabled } @@ -105,12 +105,12 @@ func Decrypt(encryptedText string) (string, error) { var config string err = json.Unmarshal([]byte(encryptedText), &config) if err != nil { - return "", fmt.Errorf("failed to unmarshal JSON string: %v", err) + return "", fmt.Errorf("failed to unmarshal JSON string: %s", err) } encryptedData, err := base64.StdEncoding.DecodeString(config) if err != nil { - return "", fmt.Errorf("failed to decode base64 data: %v", err) + return "", fmt.Errorf("failed to decode base64 data: %s", err) } // Use KMS if client is provided diff --git a/server/utils/logger/logger.go b/server/utils/logger/logger.go new file mode 100644 index 00000000..1d99f74a --- /dev/null +++ b/server/utils/logger/logger.go @@ -0,0 +1,102 @@ +package logger + +import ( + "io" + "os" + "strings" + "time" + + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/rs/zerolog" + "github.com/spf13/viper" +) + +var logger zerolog.Logger + +func Init() { + format := viper.GetString(constants.EnvLogFormat) + level := viper.GetString(constants.EnvLogLevel) + + zerolog.TimestampFunc = func() time.Time { return time.Now().UTC() } + + var writer io.Writer + switch strings.ToLower(format) { + case "console": + // Use ConsoleWriter with built-in colors and formatting + writer = zerolog.ConsoleWriter{ + Out: os.Stdout, + TimeFormat: time.RFC3339, + } + default: + // Default to JSON for production safety + writer = os.Stdout + } + + logger = zerolog.New(writer).With().Timestamp().Logger() + zerolog.SetGlobalLevel(parseLogLevel(level)) +} + +// parseLogLevel converts a string level to a zerolog.Level +func parseLogLevel(levelStr string) zerolog.Level { + switch strings.ToLower(levelStr) { + case "debug": + return zerolog.DebugLevel + case "info": + return zerolog.InfoLevel + case "warn": + return zerolog.WarnLevel + case "error": + return zerolog.ErrorLevel + case "fatal": + return zerolog.FatalLevel + default: + return zerolog.InfoLevel // Default to info level + } +} + +// Info writes record with log level INFO +func Info(v ...interface{}) { + if len(v) == 1 { + logger.Info().Interface("message", v[0]).Send() + } else { + logger.Info().Msgf("%s", v...) + } +} + +func Infof(format string, v ...interface{}) { + logger.Info().Msgf(format, v...) +} + +func Debug(v ...interface{}) { + logger.Debug().Msgf("%s", v...) +} + +func Debugf(format string, v ...interface{}) { + logger.Debug().Msgf(format, v...) +} + +func Error(v ...interface{}) { + logger.Error().Msgf("%s", v...) +} + +func Errorf(format string, v ...interface{}) { + logger.Error().Msgf(format, v...) +} + +func Warn(v ...interface{}) { + logger.Warn().Msgf("%s", v...) +} + +func Warnf(format string, v ...interface{}) { + logger.Warn().Msgf(format, v...) +} + +func Fatal(v ...interface{}) { + logger.Fatal().Msgf("%s", v...) + os.Exit(1) +} + +func Fatalf(format string, v ...interface{}) { + logger.Fatal().Msgf(format, v...) + os.Exit(1) +} diff --git a/server/internal/telemetry/auth.go b/server/utils/telemetry/auth.go similarity index 71% rename from server/internal/telemetry/auth.go rename to server/utils/telemetry/auth.go index 49608f3c..a8e36192 100644 --- a/server/internal/telemetry/auth.go +++ b/server/utils/telemetry/auth.go @@ -3,8 +3,8 @@ package telemetry import ( "context" - "github.com/beego/beego/v2/core/logs" - "github.com/datazip/olake-frontend/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) // TrackUserLogin tracks when a user logs in to olake-ui @@ -22,7 +22,7 @@ func TrackUserLogin(ctx context.Context, user *models.User) { err := TrackEvent(ctx, EventUserLogin, properties) if err != nil { - logs.Debug("Failed to track user login event: %s", err) + logger.Debug("Failed to track user login event: %s", err) } }() } diff --git a/server/internal/telemetry/constants.go b/server/utils/telemetry/constants.go similarity index 100% rename from server/internal/telemetry/constants.go rename to server/utils/telemetry/constants.go diff --git a/server/internal/telemetry/destination.go b/server/utils/telemetry/destination.go similarity index 73% rename from server/internal/telemetry/destination.go rename to server/utils/telemetry/destination.go index b92fa793..34db60ec 100644 --- a/server/internal/telemetry/destination.go +++ b/server/utils/telemetry/destination.go @@ -5,9 +5,8 @@ import ( "encoding/json" "time" - "github.com/beego/beego/v2/core/logs" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) // TrackDestinationCreation tracks the creation of a new destination with relevant properties @@ -27,7 +26,7 @@ func TrackDestinationCreation(ctx context.Context, dest *models.Destination) { var configMap map[string]interface{} // parse config to get catalog_type if err := json.Unmarshal([]byte(dest.Config), &configMap); err != nil { - logs.Debug("Failed to unmarshal config: %s", err) + logger.Debug("Failed to unmarshal config: %s", err) return } @@ -42,7 +41,7 @@ func TrackDestinationCreation(ctx context.Context, dest *models.Destination) { } if err := TrackEvent(ctx, EventDestinationCreated, properties); err != nil { - logs.Debug("Failed to track destination creation event: %s", err) + logger.Debug("Failed to track destination creation event: %s", err) return } @@ -58,13 +57,9 @@ func TrackDestinationsStatus(ctx context.Context) { return } - // TODO: remove creation of orm from here - destORM := database.NewDestinationORM() - jobORM := database.NewJobORM() - - destinations, err := destORM.GetAll() + destinations, err := instance.db.ListDestinations() if err != nil { - logs.Debug("Failed to get all destinations: %s", err) + logger.Debug("Failed to get all destinations: %s", err) return } @@ -72,9 +67,9 @@ func TrackDestinationsStatus(ctx context.Context) { for _, dest := range destinations { // TODO: remove db calls loop - jobs, err := jobORM.GetByDestinationID(dest.ID) + jobs, err := instance.db.GetJobsByDestinationID([]int{dest.ID}) if err != nil { - logs.Debug("Failed to get jobs for destination %d: %s", dest.ID, err) + logger.Debug("Failed to get jobs for destination %d: %s", dest.ID, err) break } if len(jobs) > 0 { @@ -90,7 +85,7 @@ func TrackDestinationsStatus(ctx context.Context) { } if err := TrackEvent(ctx, EventDestinationsUpdated, props); err != nil { - logs.Debug("failed to track destination status event: %s", err) + logger.Debug("failed to track destination status event: %s", err) } }() } diff --git a/server/utils/telemetry/job.go b/server/utils/telemetry/job.go new file mode 100644 index 00000000..a8ff9b6e --- /dev/null +++ b/server/utils/telemetry/job.go @@ -0,0 +1,53 @@ +package telemetry + +import ( + "context" + "time" + + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils/logger" +) + +// TrackJobCreation tracks the creation of a new job with relevant properties +func TrackJobCreation(ctx context.Context, job *models.Job) { + go func() { + if instance == nil || job == nil { + return + } + + properties := map[string]interface{}{ + "job_id": job.ID, + "job_name": job.Name, + "project_id": job.ProjectID, + "frequency": job.Frequency, + "active": job.Active, + } + + // Safely add source properties + if job.SourceID != nil { + properties["source_type"] = job.SourceID.Type + properties["source_name"] = job.SourceID.Name + } + + // Safely add destination properties + if job.DestID != nil { + properties["destination_type"] = job.DestID.DestType + properties["destination_name"] = job.DestID.Name + } + + if !job.CreatedAt.IsZero() { + properties["created_at"] = job.CreatedAt.Format(time.RFC3339) + } + + if err := TrackEvent(ctx, EventJobCreated, properties); err != nil { + logger.Debug("Failed to track job creation event: %s", err) + return + } + TrackJobEntity(ctx) + }() +} + +func TrackJobEntity(ctx context.Context) { + TrackSourcesStatus(ctx) + TrackDestinationsStatus(ctx) +} diff --git a/server/internal/telemetry/source.go b/server/utils/telemetry/source.go similarity index 69% rename from server/internal/telemetry/source.go rename to server/utils/telemetry/source.go index 3e47e769..49bc3601 100644 --- a/server/internal/telemetry/source.go +++ b/server/utils/telemetry/source.go @@ -4,9 +4,8 @@ import ( "context" "time" - "github.com/beego/beego/v2/core/logs" - "github.com/datazip/olake-frontend/server/internal/database" - "github.com/datazip/olake-frontend/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) // TrackSourceCreation tracks the creation of a new source with relevant properties @@ -28,7 +27,7 @@ func TrackSourceCreation(ctx context.Context, source *models.Source) { } if err := TrackEvent(ctx, EventSourceCreated, properties); err != nil { - logs.Debug("Failed to track source creation event: %s", err) + logger.Debug("Failed to track source creation event: %s", err) return } // Track sources status after creation @@ -43,21 +42,18 @@ func TrackSourcesStatus(ctx context.Context) { return } - sourceORM := database.NewSourceORM() - jobORM := database.NewJobORM() - - sources, err := sourceORM.GetAll() + sources, err := instance.db.ListSources() if err != nil { - logs.Debug("failed to get all sources in track source status: %s", err) + logger.Debug("failed to get all sources in track source status: %s", err) return } activeSources := 0 for _, source := range sources { // TODO: remove orm calls from loop - jobs, err := jobORM.GetBySourceID(source.ID) + jobs, err := instance.db.GetJobsBySourceID([]int{source.ID}) if err != nil { - logs.Debug("failed to get all jobs for source[%d] in track source status: %s", source.ID, err) + logger.Debug("failed to get all jobs for source[%d] in track source status: %s", source.ID, err) break } if len(jobs) > 0 { @@ -73,7 +69,7 @@ func TrackSourcesStatus(ctx context.Context) { } if err := TrackEvent(ctx, EventSourcesUpdated, props); err != nil { - logs.Debug("failed to track source status event: %s", err) + logger.Debug("failed to track source status event: %s", err) } }() } diff --git a/server/internal/telemetry/sync.go b/server/utils/telemetry/sync.go similarity index 85% rename from server/internal/telemetry/sync.go rename to server/utils/telemetry/sync.go index c069a8b4..9f03e369 100644 --- a/server/internal/telemetry/sync.go +++ b/server/utils/telemetry/sync.go @@ -9,9 +9,8 @@ import ( "path/filepath" "time" - "github.com/beego/beego/v2/core/logs" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/database" + "github.com/datazip-inc/olake-ui/server/internal/constants" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) type jobDetails struct { @@ -25,8 +24,7 @@ type jobDetails struct { } func getJobDetails(jobID int) (*jobDetails, error) { - jobORM := database.NewJobORM() - job, err := jobORM.GetByID(jobID, false) + job, err := instance.db.GetJobByID(jobID, false) if err != nil || job == nil { if job == nil { return nil, fmt.Errorf("job not found") @@ -40,7 +38,7 @@ func getJobDetails(jobID int) (*jobDetails, error) { } if job.CreatedBy != nil { - if user, err := database.NewUserORM().GetByID(job.CreatedBy.ID); err == nil { + if user, err := instance.db.GetUserByID(job.CreatedBy.ID); err == nil { details.CreatedBy = user.Username } } @@ -177,33 +175,33 @@ func TrackSyncStart(ctx context.Context, jobID int, workflowID string) { err := trackSyncEvent(ctx, jobID, workflowID, EventSyncStarted) if err != nil { - logs.Debug("failed to track sync start event: %s", err) + logger.Debug("failed to track sync start event: %s", err) } }() } -func TrackSyncFailed(ctx context.Context, jobID int, workflowID string) { +func TrackSyncFailed(jobID int, workflowID string) { go func() { if instance == nil { return } - err := trackSyncEvent(ctx, jobID, workflowID, EventSyncFailed) + err := trackSyncEvent(context.Background(), jobID, workflowID, EventSyncFailed) if err != nil { - logs.Debug("failed to track sync failed event: %s", err) + logger.Debug("failed to track sync failed event: %s", err) } }() } -func TrackSyncCompleted(ctx context.Context, jobID int, workflowID string) { +func TrackSyncCompleted(jobID int, workflowID string) { go func() { if instance == nil { return } - err := trackSyncEvent(ctx, jobID, workflowID, EventSyncCompleted) + err := trackSyncEvent(context.Background(), jobID, workflowID, EventSyncCompleted) if err != nil { - logs.Debug("failed to track sync completed event: %s", err) + logger.Debug("failed to track sync completed event: %s", err) } }() } diff --git a/server/internal/telemetry/telemetry.go b/server/utils/telemetry/telemetry.go similarity index 86% rename from server/internal/telemetry/telemetry.go rename to server/utils/telemetry/telemetry.go index 9f8d9f86..efda8288 100644 --- a/server/internal/telemetry/telemetry.go +++ b/server/utils/telemetry/telemetry.go @@ -14,6 +14,10 @@ import ( "strconv" "strings" "time" + + "github.com/datazip-inc/olake-ui/server/internal/database" + "github.com/datazip-inc/olake-ui/server/utils/logger" + "github.com/spf13/viper" ) var instance *Telemetry @@ -38,9 +42,10 @@ type Telemetry struct { locationInfo *LocationInfo TempUserID string username string + db *database.Database } -func InitTelemetry() { +func InitTelemetry(db *database.Database) { go func() { if disabled, _ := strconv.ParseBool(os.Getenv("TELEMETRY_DISABLED")); disabled { return @@ -70,17 +75,20 @@ func InitTelemetry() { return string(idBytes) }() + logger.Infof("telemetry initialized with user ID: %s, and App version: %s", tempUserID, viper.GetString("BUILD")) + instance = &Telemetry{ httpClient: &http.Client{Timeout: TelemetryConfigTimeout}, platform: PlatformInfo{ OS: runtime.GOOS, Arch: runtime.GOARCH, - OlakeVersion: OlakeVersion, + OlakeVersion: viper.GetString("BUILD"), DeviceCPU: fmt.Sprintf("%d cores", runtime.NumCPU()), }, ipAddress: ip, TempUserID: tempUserID, locationInfo: getLocationFromIP(ip), + db: db, } }() } @@ -110,7 +118,7 @@ func getLocationFromIP(ip string) *LocationInfo { if ip == IPNotFound || ip == "" { return locationInfo } - + // TODO: remove context.Background() from everywhere creare a context in main.go ctx, cancel := context.WithTimeout(context.Background(), TelemetryConfigTimeout) defer cancel() @@ -144,7 +152,7 @@ func getLocationFromIP(ip string) *LocationInfo { } // TrackEvent sends a custom event to Segment -func TrackEvent(ctx context.Context, eventName string, properties map[string]interface{}) error { +func TrackEvent(_ context.Context, eventName string, properties map[string]interface{}) error { if instance.httpClient == nil { return fmt.Errorf("telemetry client is nil") } @@ -180,8 +188,7 @@ func TrackEvent(ctx context.Context, eventName string, properties map[string]int if err != nil { return err } - - req, err := http.NewRequestWithContext(ctx, "POST", ProxyTrackURL, strings.NewReader(string(propsBody))) + req, err := http.NewRequestWithContext(context.Background(), "POST", ProxyTrackURL, strings.NewReader(string(propsBody))) if err != nil { return err } diff --git a/server/utils/utils.go b/server/utils/utils.go index eaf74f4d..5cf66610 100644 --- a/server/utils/utils.go +++ b/server/utils/utils.go @@ -12,13 +12,11 @@ import ( "strings" "time" - "github.com/beego/beego/v2/core/logs" "github.com/beego/beego/v2/server/web" "github.com/oklog/ulid" - "github.com/robfig/cron" - "github.com/datazip/olake-frontend/server/internal/constants" - "github.com/datazip/olake-frontend/server/internal/models" + "github.com/datazip-inc/olake-ui/server/internal/models/dto" + "github.com/datazip-inc/olake-ui/server/utils/logger" ) func ToMapOfInterface(structure any) map[string]interface{} { @@ -36,7 +34,7 @@ func ToMapOfInterface(structure any) map[string]interface{} { func RespondJSON(ctx *web.Controller, status int, success bool, message string, data interface{}) { ctx.Ctx.Output.SetStatus(status) - ctx.Data["json"] = models.JSONResponse{ + ctx.Data["json"] = dto.JSONResponse{ Success: success, Message: message, Data: data, @@ -44,11 +42,14 @@ func RespondJSON(ctx *web.Controller, status int, success bool, message string, _ = ctx.ServeJSON() } -func SuccessResponse(ctx *web.Controller, data interface{}) { - RespondJSON(ctx, http.StatusOK, true, "success", data) +func SuccessResponse(ctx *web.Controller, message string, data interface{}) { + RespondJSON(ctx, http.StatusOK, true, message, data) } -func ErrorResponse(ctx *web.Controller, status int, message string) { +func ErrorResponse(ctx *web.Controller, status int, message string, err error) { + if err != nil { + logger.Errorf("error in request %s: %s", ctx.Ctx.Input.URI(), err) + } RespondJSON(ctx, status, false, message, nil) } @@ -117,7 +118,7 @@ func ULID() string { t := time.Now() newUlid, err := ulid.New(ulid.Timestamp(t), entropy) if err != nil { - logs.Critical(err) + logger.Fatal(err) } return newUlid.String() @@ -134,7 +135,7 @@ func Ternary(cond bool, a, b any) any { func CreateDirectory(dirPath string, perm os.FileMode) error { if _, err := os.Stat(dirPath); os.IsNotExist(err) { if err := os.MkdirAll(dirPath, perm); err != nil { - return fmt.Errorf("failed to create directory %s: %v", dirPath, err) + return fmt.Errorf("failed to create directory %s: %s", dirPath, err) } } return nil @@ -148,26 +149,11 @@ func WriteFile(filePath string, data []byte, perm os.FileMode) error { } if err := os.WriteFile(filePath, data, perm); err != nil { - return fmt.Errorf("failed to write to file %s: %v", filePath, err) + return fmt.Errorf("failed to write to file %s: %s", filePath, err) } return nil } -// ParseJSONFile parses a JSON file into a map -func ParseJSONFile(filePath string) (map[string]interface{}, error) { - fileData, err := os.ReadFile(filePath) - if err != nil { - return nil, fmt.Errorf("failed to read file %s: %v", filePath, err) - } - - var result map[string]interface{} - if err := json.Unmarshal(fileData, &result); err != nil { - return nil, fmt.Errorf("failed to parse JSON from file %s: %v", filePath, err) - } - - return result, nil -} - // ToCron converts a frequency string to a cron expression func ToCron(frequency string) string { parts := strings.Split(strings.ToLower(frequency), "-") @@ -201,75 +187,6 @@ func ToCron(frequency string) string { } } -func CleanOldLogs(logDir string, retentionPeriod int) { - logs.Info("Running log cleaner...") - cutoff := time.Now().AddDate(0, 0, -retentionPeriod) - - // check if old logs are present - shouldDelete := func(path string, cutoff time.Time) bool { - entries, _ := os.ReadDir(path) - if len(entries) == 0 { - return true - } - - var foundOldLog bool - _ = filepath.Walk(path, func(filePath string, info os.FileInfo, _ error) error { - if info == nil || info.IsDir() { - return nil - } - if (strings.HasSuffix(filePath, ".log") || strings.HasSuffix(filePath, ".log.gz")) && - info.ModTime().Before(cutoff) { - foundOldLog = true - return filepath.SkipDir - } - return nil - }) - return foundOldLog - } - - entries, err := os.ReadDir(logDir) - if err != nil { - logs.Error("failed to read log dir: %v", err) - return - } - // delete dir if old logs are found or is empty - for _, entry := range entries { - if !entry.IsDir() || entry.Name() == "telemetry" { - continue - } - dirPath := filepath.Join(logDir, entry.Name()) - if toDelete := shouldDelete(dirPath, cutoff); toDelete { - logs.Info("Deleting folder: %s", dirPath) - _ = os.RemoveAll(dirPath) - } - } -} - -// starts a log cleaner that removes old logs from the specified directory based on the retention period -func InitLogCleaner(logDir string, retentionPeriod int) { - logs.Info("Log cleaner started...") - CleanOldLogs(logDir, retentionPeriod) // catchup missed cycles if any - c := cron.New() - err := c.AddFunc("@midnight", func() { - CleanOldLogs(logDir, retentionPeriod) - }) - if err != nil { - logs.Error("Failed to start log cleaner: %v", err) - return - } - c.Start() -} - -// GetRetentionPeriod returns the retention period for logs -func GetLogRetentionPeriod() int { - if val := os.Getenv("LOG_RETENTION_PERIOD"); val != "" { - if retentionPeriod, err := strconv.Atoi(val); err == nil && retentionPeriod > 0 { - return retentionPeriod - } - } - return constants.DefaultLogRetentionPeriod -} - // ExtractJSON extracts and returns the last valid JSON block from output func ExtractJSON(output string) (map[string]interface{}, error) { outputStr := strings.TrimSpace(output) @@ -300,3 +217,100 @@ func ExtractJSON(output string) (map[string]interface{}, error) { return nil, fmt.Errorf("no valid JSON block found in output") } + +// LogEntry represents a log entry +type LogEntry struct { + Level string `json:"level"` + Time time.Time `json:"time"` + Message json.RawMessage `json:"message"` // store raw JSON +} + +// ReadLogs reads logs from the given mainLogDir and returns structured log entries. +func ReadLogs(mainLogDir string) ([]map[string]interface{}, error) { + // Check if mainLogDir exists + if _, err := os.Stat(mainLogDir); os.IsNotExist(err) { + return nil, fmt.Errorf("logs directory not found: %s", mainLogDir) + } + + // Logs directory + logsDir := filepath.Join(mainLogDir, "logs") + if _, err := os.Stat(logsDir); os.IsNotExist(err) { + return nil, fmt.Errorf("logs directory not found: %s", logsDir) + } + + files, err := os.ReadDir(logsDir) + if err != nil || len(files) == 0 { + return nil, fmt.Errorf("logs directory empty in: %s", logsDir) + } + + logDir := filepath.Join(logsDir, files[0].Name()) + logPath := filepath.Join(logDir, "olake.log") + logContent, err := os.ReadFile(logPath) + if err != nil { + return nil, fmt.Errorf("failed to read log file: %s", logPath) + } + + var parsedLogs []map[string]interface{} + lines := strings.Split(string(logContent), "\n") + + for _, line := range lines { + if line == "" { + continue + } + + var logEntry LogEntry + if err := json.Unmarshal([]byte(line), &logEntry); err != nil { + continue + } + + if logEntry.Level == "debug" { + continue + } + + // Convert Message to string safely + var messageStr string + var tmp interface{} + if err := json.Unmarshal(logEntry.Message, &tmp); err == nil { + switch v := tmp.(type) { + case string: + messageStr = v // plain string + default: + msgBytes, _ := json.Marshal(v) // object/array + messageStr = string(msgBytes) + } + } else { + // fallback: raw bytes as string + messageStr = string(logEntry.Message) + } + + parsedLogs = append(parsedLogs, map[string]interface{}{ + "level": logEntry.Level, + "time": logEntry.Time.UTC().Format(time.RFC3339), + "message": messageStr, + }) + } + + return parsedLogs, nil +} + +// RetryWithBackoff retries a function with exponential backoff +func RetryWithBackoff(fn func() error, maxRetries int, initialDelay time.Duration) error { + delay := initialDelay + var errMsg error + + for retry := 0; retry < maxRetries; retry++ { + if err := fn(); err != nil { + errMsg = err + if retry < maxRetries-1 { + logger.Warnf("Retry attempt %d/%d failed: %s. Retrying in %v...", retry+1, maxRetries, err, delay) + time.Sleep(delay) + delay *= 2 + continue + } + } else { + return nil + } + } + + return fmt.Errorf("failed after %d retries: %s", maxRetries, errMsg) +} diff --git a/ui/package.json b/ui/package.json index 24407fc4..ad7531d1 100644 --- a/ui/package.json +++ b/ui/package.json @@ -14,7 +14,10 @@ "lint:fix": "eslint \"**/*.{js,jsx,ts,tsx}\" --max-warnings 0 --fix", "preview": "vite preview", "format": "prettier --write \"**/*.{js,jsx,ts,tsx}\"", - "format:check": "prettier --check \"**/*.{js,jsx,ts,tsx}\"" + "format:check": "prettier --check \"**/*.{js,jsx,ts,tsx}\"", + "test": "playwright test", + "test:headed": "playwright test --headed", + "test:codegen": "playwright codegen http://localhost:8000/login" }, "dependencies": { "@babel/runtime": "7.26.10", @@ -36,6 +39,7 @@ }, "devDependencies": { "@eslint/js": "^9.19.0", + "@playwright/test": "^1.55.0", "@types/json-schema": "^7.0.15", "@types/node": "^22.13.10", "@types/react": "^18.2.62", diff --git a/ui/playwright.config.ts b/ui/playwright.config.ts new file mode 100644 index 00000000..88062caa --- /dev/null +++ b/ui/playwright.config.ts @@ -0,0 +1,36 @@ +import { defineConfig, devices } from "@playwright/test" + +//TODO: Decide on Timeouts and adjust them as needed + +// Constants +export const TIMEOUTS = { + LONG: 5 * 60 * 1000, // 5 minutes + SHORT: 10 * 1000, // 10 seconds +} as const + +export default defineConfig({ + testDir: "./tests", + timeout: 10 * 60 * 1000, // 10 minutes global test timeout + expect: { + timeout: 5 * 60 * 1000, // 5 minutes for expect assertions + }, + use: { + baseURL: "http://localhost:8000", + screenshot: "only-on-failure", + actionTimeout: 5 * 60 * 1000, // 5 minutes for actions + navigationTimeout: 30 * 1000, // 30 seconds for navigation + }, + projects: [ + // Setup project - runs first to create authenticated state + { + name: "setup", + testMatch: /.*\.setup\.ts/, + }, + // Main test project - depends on setup + { + name: "chromium", + use: { ...devices["Desktop Chrome"] }, + dependencies: ["setup"], + }, + ], +}) diff --git a/ui/pnpm-lock.yaml b/ui/pnpm-lock.yaml index 7c30b8ee..402d006c 100644 --- a/ui/pnpm-lock.yaml +++ b/ui/pnpm-lock.yaml @@ -64,6 +64,9 @@ importers: '@eslint/js': specifier: ^9.19.0 version: 9.35.0 + '@playwright/test': + specifier: ^1.55.0 + version: 1.55.1 '@types/json-schema': specifier: ^7.0.15 version: 7.0.15 @@ -521,6 +524,11 @@ packages: resolution: {integrity: sha512-QNqXyfVS2wm9hweSYD2O7F0G06uurj9kZ96TRQE5Y9hU7+tgdZwIkbAKc5Ocy1HxEY2kuDQa6cQ1WRs/O5LFKA==} engines: {node: ^12.20.0 || ^14.18.0 || >=16.0.0} + '@playwright/test@1.55.1': + resolution: {integrity: sha512-IVAh/nOJaw6W9g+RJVlIQJ6gSiER+ae6mKQ5CX1bERzQgbC1VSeBlwdvczT7pxb0GWiyrxH4TGKbMfDb4Sq/ig==} + engines: {node: '>=18'} + hasBin: true + '@rc-component/async-validator@5.0.4': resolution: {integrity: sha512-qgGdcVIF604M9EqjNF0hbUTz42bz/RDtxWdWuU5EQe3hi7M8ob54B6B35rOsvX5eSvIHIzT9iH1R3n+hk3CGfg==} engines: {node: '>=14.x'} @@ -1309,6 +1317,11 @@ packages: fraction.js@4.3.7: resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + fsevents@2.3.3: resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} @@ -1785,6 +1798,16 @@ packages: resolution: {integrity: sha512-TfySrs/5nm8fQJDcBDuUng3VOUKsd7S+zqvbOTiGXHfxX4wK31ard+hoNuvkicM/2YFzlpDgABOevKSsB4G/FA==} engines: {node: '>= 6'} + playwright-core@1.55.1: + resolution: {integrity: sha512-Z6Mh9mkwX+zxSlHqdr5AOcJnfp+xUWLCt9uKV18fhzA8eyxUd8NUWzAjxUh55RZKSYwDGX0cfaySdhZJGMoJ+w==} + engines: {node: '>=18'} + hasBin: true + + playwright@1.55.1: + resolution: {integrity: sha512-cJW4Xd/G3v5ovXtJJ52MAOclqeac9S/aGGgRzLabuF8TnIb6xHvMzKIa6JmrRzUkeXJgfL1MhukP0NK6l39h3A==} + engines: {node: '>=18'} + hasBin: true + possible-typed-array-names@1.1.0: resolution: {integrity: sha512-/+5VFTchJDoVj3bhoqi6UeymcD00DAwb1nJwamzPvHEszJ4FpF6SNNbUbOS8yI56qHzdV8eK0qEfOSiodkTdxg==} engines: {node: '>= 0.4'} @@ -2952,6 +2975,10 @@ snapshots: '@pkgr/core@0.2.9': {} + '@playwright/test@1.55.1': + dependencies: + playwright: 1.55.1 + '@rc-component/async-validator@5.0.4': dependencies: '@babel/runtime': 7.26.10 @@ -3963,6 +3990,9 @@ snapshots: fraction.js@4.3.7: {} + fsevents@2.3.2: + optional: true + fsevents@2.3.3: optional: true @@ -4421,6 +4451,14 @@ snapshots: pirates@4.0.7: {} + playwright-core@1.55.1: {} + + playwright@1.55.1: + dependencies: + playwright-core: 1.55.1 + optionalDependencies: + fsevents: 2.3.2 + possible-typed-array-names@1.1.0: {} postcss-import@15.1.0(postcss@8.5.6): diff --git a/ui/public/vite.svg b/ui/public/vite.svg deleted file mode 100644 index e7b8dfb1..00000000 --- a/ui/public/vite.svg +++ /dev/null @@ -1 +0,0 @@ - \ No newline at end of file diff --git a/ui/src/api/axios.ts b/ui/src/api/axios.ts index 84c3da3c..42da4a93 100644 --- a/ui/src/api/axios.ts +++ b/ui/src/api/axios.ts @@ -9,6 +9,16 @@ import { HTTP_STATUS, LOCALSTORAGE_TOKEN_KEY, } from "../utils/constants" +import { notificationService } from "./services/notificationService" +/** + * Extend Axios types to support our custom notification flag + */ +declare module "axios" { + export interface AxiosRequestConfig { + showNotification?: boolean // Controls whether the interceptor shows a toast (default: false) + disableErrorNotification?: boolean + } +} /** * Creates and configures an axios instance with default settings @@ -44,9 +54,40 @@ api.interceptors.request.use( */ api.interceptors.response.use( (response: AxiosResponse) => { + const config = response.config + const payload = response.data + + // Show toast only if explicitly enabled for this request + if (config.showNotification === true) { + notificationService.success(payload.message) + } + + // Return only the actual data to the caller (unwrap the envelope) + response.data = payload.data + return response }, (error: AxiosError) => { + const payload = error.response?.data as any + const config = error.config + + // Skip showing errors for canceled requests + if ( + axios.isCancel(error) || + error.code === "ERR_CANCELED" || + config?.disableErrorNotification + ) { + return Promise.reject(error) + } + + // Always show error toasts + if (payload.message) { + notificationService.error( + payload.message || "An error occurred! Please try again.", + ) + } + + // Handle specific HTTP status codes if (error.response) { const { status } = error.response diff --git a/ui/src/api/services/analyticsService.ts b/ui/src/api/services/analyticsService.ts index e3212c94..71d8b864 100644 --- a/ui/src/api/services/analyticsService.ts +++ b/ui/src/api/services/analyticsService.ts @@ -1,6 +1,11 @@ +/** + * AnalyticsService handles sending analytics events + */ + import api from "../axios" import axios from "axios" +// endpoint which handles rate limiting and forwards the events to mixpanel const ANALYTICS_ENDPOINT = "https://analytics.olake.io/mp/track" const sendAnalyticsEvent = async ( @@ -57,10 +62,11 @@ const getSystemInfo = async () => { } } +// returns a unique user id for the user to track them across sessions const getTelemetryID = async (): Promise => { try { const response = await api.get("/telemetry-id") - return response.data.data.user_id || "" + return response.data.user_id || "" } catch (error) { console.error("Error fetching telemetry ID:", error) return "" @@ -77,6 +83,7 @@ export const trackEvent = async ( return } + // if user is already logged in we'll get the username from local storage const username = localStorage.getItem("username") const systemInfo = await getSystemInfo() diff --git a/ui/src/api/services/authService.ts b/ui/src/api/services/authService.ts index f14b6b1c..0f8ee1ce 100644 --- a/ui/src/api/services/authService.ts +++ b/ui/src/api/services/authService.ts @@ -2,7 +2,7 @@ * AuthService handles authentication-related API calls and localStorage management. */ import api from "../axios" -import { APIResponse, LoginArgs, LoginResponse } from "../../types" +import { LoginArgs, LoginResponse } from "../../types" import { LOCALSTORAGE_TOKEN_KEY, LOCALSTORAGE_USERNAME_KEY, @@ -11,7 +11,7 @@ import { export const authService = { login: async ({ username, password }: LoginArgs) => { try { - const response = await api.post>( + const response = await api.post( "/login", { username, @@ -24,25 +24,11 @@ export const authService = { }, ) - if (response.data.success) { - localStorage.setItem( - LOCALSTORAGE_USERNAME_KEY, - response.data.data.username, - ) - localStorage.setItem(LOCALSTORAGE_TOKEN_KEY, "authenticated") - return response.data.data - } - - throw new Error(response.data.message) + localStorage.setItem(LOCALSTORAGE_USERNAME_KEY, response.data.username) + localStorage.setItem(LOCALSTORAGE_TOKEN_KEY, "authenticated") + return response.data } catch (error: any) { - // Handle 400 status code specifically - if (error.response?.status === 400) { - throw new Error(error.response.data.message || "Invalid credentials") - } - // Handle other errors - throw new Error( - error.response?.data?.message || error.message || "Login failed", - ) + throw error } }, diff --git a/ui/src/api/services/destinationService.ts b/ui/src/api/services/destinationService.ts index 09afd4f9..7ab594e3 100644 --- a/ui/src/api/services/destinationService.ts +++ b/ui/src/api/services/destinationService.ts @@ -1,7 +1,6 @@ import api from "../axios" import { API_CONFIG } from "../config" import { - APIResponse, Entity, EntityBase, EntityTestRequest, @@ -11,8 +10,9 @@ import { getConnectorInLowerCase } from "../../utils/utils" // TODO: Make it parquet on all places const normalizeDestinationType = (type: string): string => { + //destination connector typemap const typeMap: Record = { - "amazon s3": "s3", + "amazon s3": "parquet", "apache iceberg": "iceberg", } return typeMap[type.toLowerCase()] || type.toLowerCase() @@ -21,10 +21,10 @@ const normalizeDestinationType = (type: string): string => { export const destinationService = { getDestinations: async () => { try { - const response = await api.get>( + const response = await api.get( API_CONFIG.ENDPOINTS.DESTINATIONS(API_CONFIG.PROJECT_ID), ) - const destinations: Entity[] = response.data.data.map(item => { + const destinations: Entity[] = response.data.map(item => { const config = JSON.parse(item.config) return { ...item, @@ -52,7 +52,7 @@ export const destinationService = { updateDestination: async (id: string, destination: EntityBase) => { try { - const response = await api.put>( + const response = await api.put( `${API_CONFIG.ENDPOINTS.DESTINATIONS(API_CONFIG.PROJECT_ID)}/${id}`, { name: destination.name, @@ -63,6 +63,7 @@ export const destinationService = { ? destination.config : JSON.stringify(destination.config), }, + { showNotification: true }, ) return response.data } catch (error) { @@ -74,6 +75,7 @@ export const destinationService = { deleteDestination: async (id: number) => { await api.delete( `${API_CONFIG.ENDPOINTS.DESTINATIONS(API_CONFIG.PROJECT_ID)}/${id}`, + { showNotification: true }, ) return }, @@ -84,7 +86,7 @@ export const destinationService = { source_version: string = "", ) => { try { - const response = await api.post>( + const response = await api.post( `${API_CONFIG.ENDPOINTS.DESTINATIONS(API_CONFIG.PROJECT_ID)}/test`, { type: getConnectorInLowerCase(destination.type), @@ -93,12 +95,13 @@ export const destinationService = { source_type: source_type, source_version: source_version, }, - { timeout: 0 }, + //timeout is 0 as test connection takes more time as it needs to connect to the destination + { timeout: 0, disableErrorNotification: true }, ) return { - success: response.data.success, - message: response.data.message, - data: response.data.data, + success: true, + message: "success", + data: response.data, } } catch (error) { console.error("Error testing destination connection:", error) @@ -111,7 +114,7 @@ export const destinationService = { }, getDestinationVersions: async (type: string) => { - const response = await api.get>( + const response = await api.get<{ version: string[] }>( `${API_CONFIG.ENDPOINTS.DESTINATIONS(API_CONFIG.PROJECT_ID)}/versions/?type=${type}`, { timeout: 0, @@ -128,7 +131,7 @@ export const destinationService = { signal?: AbortSignal, ) => { const normalizedType = normalizeDestinationType(type) - const response = await api.post>( + const response = await api.post( `${API_CONFIG.ENDPOINTS.DESTINATIONS(API_CONFIG.PROJECT_ID)}/spec`, { type: normalizedType, @@ -136,7 +139,8 @@ export const destinationService = { source_type: source_type, source_version: source_version, }, - { timeout: 300000, signal }, + //timeout is 300000 as spec takes more time as it needs to fetch the spec from the destination + { timeout: 300000, signal, disableErrorNotification: true }, ) return response.data }, diff --git a/ui/src/api/services/jobService.ts b/ui/src/api/services/jobService.ts index 7d392727..40bfa184 100644 --- a/ui/src/api/services/jobService.ts +++ b/ui/src/api/services/jobService.ts @@ -1,15 +1,15 @@ import api from "../axios" import { API_CONFIG } from "../config" -import { APIResponse, Job, JobBase, JobTask, TaskLog } from "../../types" +import { Job, JobBase, JobTask, TaskLog } from "../../types" export const jobService = { getJobs: async (): Promise => { try { - const response = await api.get>( + const response = await api.get( API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID), ) - return response.data.data + return response.data } catch (error) { console.error("Error fetching jobs from API:", error) throw error @@ -34,6 +34,7 @@ export const jobService = { const response = await api.put( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/${id}`, job, + { showNotification: true }, ) return response.data } catch (error) { @@ -46,6 +47,7 @@ export const jobService = { try { await api.delete( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/${id}`, + { showNotification: true }, ) } catch (error) { console.error("Error deleting job:", error) @@ -55,10 +57,11 @@ export const jobService = { cancelJob: async (id: string): Promise => { try { - const response = await api.get>( + const response = await api.get( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/${id}/cancel`, + { showNotification: true }, ) - return response.data.data.message + return response.data.message } catch (error) { console.error("Error canceling job:", error) throw error @@ -67,10 +70,10 @@ export const jobService = { syncJob: async (id: string): Promise => { try { - const response = await api.post>( + const response = await api.post( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/${id}/sync`, {}, - { timeout: 0 }, // Disable timeout for this request since it can take longer + { timeout: 0, showNotification: true }, // Disable timeout for this request since it can take longer ) return response.data } catch (error) { @@ -79,11 +82,11 @@ export const jobService = { } }, - getJobTasks: async (id: string): Promise> => { + getJobTasks: async (id: string): Promise => { try { - const response = await api.get>( + const response = await api.get( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/${id}/tasks`, - { timeout: 0 }, // Disable timeout for this request + { timeout: 0, showNotification: true }, // Disable timeout for this request, no toast for fetching tasks ) return response.data } catch (error) { @@ -96,12 +99,12 @@ export const jobService = { jobId: string, taskId: string, filePath: string, - ): Promise> => { + ): Promise => { try { - const response = await api.post>( + const response = await api.post( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/${jobId}/tasks/${taskId}/logs`, { file_path: filePath }, - { timeout: 0 }, + { timeout: 0, showNotification: true }, // Disable timeout for this request since it can take longer, no toast for logs ) return response.data } catch (error) { @@ -110,14 +113,13 @@ export const jobService = { } }, - activateJob: async ( - jobId: string, - activate: boolean, - ): Promise> => { + //This either pauses or resumes the job + activateJob: async (jobId: string, activate: boolean): Promise => { try { - const response = await api.post>( + const response = await api.post( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/${jobId}/activate`, { activate }, + { showNotification: true }, ) return response.data } catch (error) { @@ -128,11 +130,11 @@ export const jobService = { checkJobNameUnique: async (jobName: string): Promise<{ unique: boolean }> => { try { - const response = await api.post>( + const response = await api.post<{ unique: boolean }>( `${API_CONFIG.ENDPOINTS.JOBS(API_CONFIG.PROJECT_ID)}/check-unique`, { job_name: jobName }, ) - return response.data.data + return response.data } catch (error) { console.error("Error checking job name uniqueness:", error) throw error diff --git a/ui/src/api/services/notificationService.ts b/ui/src/api/services/notificationService.ts new file mode 100644 index 00000000..a5f1b081 --- /dev/null +++ b/ui/src/api/services/notificationService.ts @@ -0,0 +1,19 @@ +import { message } from "antd" + +/** + * A decoupled service for showing UI notifications. + */ +export const notificationService = { + success: (msg: string) => { + if (msg) { + message.destroy() + message.success(msg) + } + }, + error: (msg: string) => { + if (msg) { + message.destroy() + message.error(msg) + } + }, +} diff --git a/ui/src/api/services/sourceService.ts b/ui/src/api/services/sourceService.ts index 3f4d3a10..a267cd4c 100644 --- a/ui/src/api/services/sourceService.ts +++ b/ui/src/api/services/sourceService.ts @@ -2,7 +2,6 @@ import api from "../axios" import { API_CONFIG } from "../config" import { Entity, - APIResponse, EntityBase, EntityTestRequest, EntityTestResponse, @@ -11,11 +10,11 @@ import { export const sourceService = { getSources: async (): Promise => { try { - const response = await api.get>( + const response = await api.get( API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID), ) - return response.data.data.map(item => ({ + return response.data.map(item => ({ ...item, config: JSON.parse(item.config), })) @@ -27,7 +26,7 @@ export const sourceService = { createSource: async (source: EntityBase) => { try { - const response = await api.post>( + const response = await api.post( API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID), source, ) @@ -40,7 +39,7 @@ export const sourceService = { updateSource: async (id: string, source: EntityBase) => { try { - const response = await api.put>( + const response = await api.put( `${API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID)}/${id}`, { name: source.name, @@ -51,6 +50,7 @@ export const sourceService = { ? source.config : JSON.stringify(source.config), }, + { showNotification: true }, ) return response.data } catch (error) { @@ -63,6 +63,7 @@ export const sourceService = { try { await api.delete( `${API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID)}/${id}`, + { showNotification: true }, ) } catch (error) { console.error("Error deleting source:", error) @@ -72,19 +73,19 @@ export const sourceService = { testSourceConnection: async (source: EntityTestRequest) => { try { - const response = await api.post>( + const response = await api.post( `${API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID)}/test`, { type: source.type.toLowerCase(), version: source.version, config: source.config, }, - { timeout: 0 }, + { timeout: 0, disableErrorNotification: true }, // Disable timeout for this request since it can take longer ) return { - success: response.data.success, - message: response.data.message, - data: response.data.data, + success: true, + message: "success", + data: response.data, } } catch (error) { console.error("Error testing source connection:", error) @@ -98,10 +99,10 @@ export const sourceService = { getSourceVersions: async (type: string) => { try { - const response = await api.get>( + const response = await api.get<{ version: string[] }>( `${API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID)}/versions/?type=${type}`, { - timeout: 0, + timeout: 0, // Disable timeout for this request since it can take longer }, ) return response.data @@ -117,13 +118,13 @@ export const sourceService = { signal?: AbortSignal, ) => { try { - const response = await api.post>( + const response = await api.post( `${API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID)}/spec`, { type: type.toLowerCase(), version, }, - { timeout: 300000, signal }, + { timeout: 300000, signal, disableErrorNotification: true }, //timeout is 300000 as spec takes more time as it needs to fetch the spec from olake ) return response.data } catch (error) { @@ -132,6 +133,7 @@ export const sourceService = { } }, + //fetches source specific streams getSourceStreams: async ( name: string, type: string, @@ -141,7 +143,7 @@ export const sourceService = { job_id?: number, ) => { try { - const response = await api.post>>( + const response = await api.post>( `${API_CONFIG.ENDPOINTS.SOURCES(API_CONFIG.PROJECT_ID)}/streams`, { name, diff --git a/ui/src/modules/auth/pages/Login.tsx b/ui/src/modules/auth/pages/Login.tsx index 7682fb55..e1cedc18 100644 --- a/ui/src/modules/auth/pages/Login.tsx +++ b/ui/src/modules/auth/pages/Login.tsx @@ -1,7 +1,7 @@ import { useState } from "react" import { useNavigate } from "react-router-dom" import { Form, Input, Button, Card, message } from "antd" -import { User, LockKey } from "@phosphor-icons/react" +import { UserIcon, LockKeyIcon } from "@phosphor-icons/react" import { useAppStore } from "../../../store" import { LoginArgs } from "../../../types" @@ -25,6 +25,7 @@ const Login: React.FC = () => { duration: 3, className: "font-medium", }) + // clear form in the case of error form.resetFields() } setLoading(false) @@ -56,7 +57,7 @@ const Login: React.FC = () => { > { > { centered >
- diff --git a/ui/src/modules/common/Modals/ClearDestinationAndSyncModal.tsx b/ui/src/modules/common/Modals/ClearDestinationAndSyncModal.tsx index 189083f1..71f9c3be 100644 --- a/ui/src/modules/common/Modals/ClearDestinationAndSyncModal.tsx +++ b/ui/src/modules/common/Modals/ClearDestinationAndSyncModal.tsx @@ -1,5 +1,5 @@ import { useNavigate } from "react-router-dom" -import { Warning } from "@phosphor-icons/react" +import { WarningIcon } from "@phosphor-icons/react" import { Button, message, Modal } from "antd" import { useAppStore } from "../../../store" @@ -18,7 +18,7 @@ const ClearDestinationAndSyncModal = () => { centered >
- diff --git a/ui/src/modules/common/Modals/DeleteJobModal.tsx b/ui/src/modules/common/Modals/DeleteJobModal.tsx index 8ac1513f..93a60c62 100644 --- a/ui/src/modules/common/Modals/DeleteJobModal.tsx +++ b/ui/src/modules/common/Modals/DeleteJobModal.tsx @@ -1,6 +1,6 @@ import { useNavigate } from "react-router-dom" -import { Button, message, Modal } from "antd" -import { Warning } from "@phosphor-icons/react" +import { Button, Modal } from "antd" +import { WarningIcon } from "@phosphor-icons/react" import { useAppStore } from "../../../store" @@ -25,7 +25,7 @@ const DeleteJobModal = ({ centered >
- @@ -41,10 +41,7 @@ const DeleteJobModal = ({ onClick={() => { setShowDeleteJobModal(false) if (selectedJobId) { - deleteJob(selectedJobId).catch(error => { - message.error("Failed to delete job") - console.error(error) - }) + deleteJob(selectedJobId) } if (fromJobSettings) { setTimeout(() => { diff --git a/ui/src/modules/common/Modals/DeleteModal.tsx b/ui/src/modules/common/Modals/DeleteModal.tsx index 696a51c3..7319b686 100644 --- a/ui/src/modules/common/Modals/DeleteModal.tsx +++ b/ui/src/modules/common/Modals/DeleteModal.tsx @@ -1,12 +1,13 @@ import { formatDistanceToNow } from "date-fns" import { Button, message, Modal, Table } from "antd" -import { Warning } from "@phosphor-icons/react" +import { WarningIcon } from "@phosphor-icons/react" import { useAppStore } from "../../../store" import { Entity } from "../../../types" import { DeleteModalProps } from "../../../types/modalTypes" import { getConnectorImage } from "../../../utils/utils" +//Entity Delete Modal const DeleteModal = ({ fromSource }: DeleteModalProps) => { const { showDeleteModal, @@ -34,20 +35,12 @@ const DeleteModal = ({ fromSource }: DeleteModalProps) => { const handleDeleteSource = () => { message.info(`Deleting source ${selectedSource?.name}`) - deleteSource(selectedSource?.id as unknown as string).catch(error => { - message.error("Failed to delete source") - console.error(error) - }) + deleteSource(selectedSource?.id as unknown as string) setShowDeleteModal(false) } const handleDeleteDestination = () => { message.info(`Deleting destination ${selectedDestination?.name}`) - deleteDestination(selectedDestination?.id as unknown as string).catch( - error => { - message.error("Failed to delete destination") - console.error(error) - }, - ) + deleteDestination(selectedDestination?.id as unknown as string) setShowDeleteModal(false) } @@ -137,7 +130,7 @@ const DeleteModal = ({ fromSource }: DeleteModalProps) => { width={600} >
- diff --git a/ui/src/modules/common/Modals/DestinationDatabaseModal.tsx b/ui/src/modules/common/Modals/DestinationDatabaseModal.tsx index 03a92e86..20e51655 100644 --- a/ui/src/modules/common/Modals/DestinationDatabaseModal.tsx +++ b/ui/src/modules/common/Modals/DestinationDatabaseModal.tsx @@ -8,7 +8,7 @@ import { LABELS, NAMESPACE_PLACEHOLDER, } from "../../../utils/constants" -import { DotOutline } from "@phosphor-icons/react" +import { DotOutlineIcon } from "@phosphor-icons/react" import { DestinationDatabaseModalProps } from "../../../types" type FormatType = (typeof FORMAT_OPTIONS)[keyof typeof FORMAT_OPTIONS] @@ -177,7 +177,7 @@ const DestinationDatabaseModal = ({ key={index} className="flex items-center text-sm" > - diff --git a/ui/src/modules/common/Modals/EditSourceModal.tsx b/ui/src/modules/common/Modals/EditSourceModal.tsx deleted file mode 100644 index 2a492a0d..00000000 --- a/ui/src/modules/common/Modals/EditSourceModal.tsx +++ /dev/null @@ -1,199 +0,0 @@ -import { Button, Modal, Table, message } from "antd" -import { CheckCircle, Warning } from "@phosphor-icons/react" -import { formatDistanceToNow } from "date-fns" -import { useNavigate } from "react-router-dom" - -import { sourceService } from "../../../api" -import { useAppStore } from "../../../store" -import { getConnectorImage } from "../../../utils/utils" - -const EditSourceModal = () => { - const navigate = useNavigate() - const { - showEditSourceModal, - setShowEditSourceModal, - showSuccessModal, - setShowSuccessModal, - selectedSource, - updateSource, - setShowTestingModal, - setShowFailureModal, - setSourceTestConnectionError, - } = useAppStore() - - const getSourceData = () => { - const configStr = - typeof selectedSource?.config === "string" - ? selectedSource?.config - : JSON.stringify(selectedSource?.config) - - const sourceData = { - name: selectedSource?.name, - type: selectedSource?.type, - version: selectedSource?.version, - config: configStr, - } - return sourceData - } - - const handleEdit = async () => { - if (!selectedSource?.id) { - message.error("Source ID is missing") - return - } - - try { - setShowEditSourceModal(false) - setShowTestingModal(true) - const testResult = - await sourceService.testSourceConnection(getSourceData()) - - if (testResult.data?.status === "SUCCEEDED") { - setTimeout(() => { - setShowTestingModal(false) - setShowSuccessModal(true) - }, 1000) - - setTimeout(async () => { - setShowSuccessModal(false) - await updateSource(selectedSource.id.toString(), selectedSource) - navigate("/sources") - }, 2000) - } else { - setShowTestingModal(false) - setSourceTestConnectionError(testResult.data?.message || "") - setShowFailureModal(true) - } - } catch (error) { - message.error("Failed to update source") - console.error(error) - } - } - - return ( - <> - - -
- } - open={showEditSourceModal} - onCancel={() => setShowEditSourceModal(false)} - footer={[ - , - , - ]} - centered - width="38%" - > -
-

- Due to the editing, the jobs are going to get affected -

-

- Editing this source will affect the following jobs that are - associated with this source and as a result will fail immediately. - Do you still want to edit the source? -

-
-
- ( - - {activate ? "Active" : "Inactive"} - - ), - }, - { - title: "Last runtime", - dataIndex: "last_run_time", - key: "last_run_time", - render: (text: string) => ( - - {text !== undefined - ? formatDistanceToNow(new Date(text), { - addSuffix: true, - }) - : "-"} - - ), - }, - { - title: "Destination", - dataIndex: "destination_name", - key: "destination_name", - render: (destination_name: string, record: any) => ( -
- {record.destination_type} - {destination_name || "N/A"} -
- ), - }, - ]} - dataSource={selectedSource?.jobs} - pagination={false} - rowKey="key" - scroll={{ y: 300 }} - /> - - - - {/* Success Modal */} - -
- -
- Changes are saved successfully -
-
-
- - ) -} - -export default EditSourceModal diff --git a/ui/src/modules/common/Modals/EntityCancelModal.tsx b/ui/src/modules/common/Modals/EntityCancelModal.tsx index 3739fcb0..22e2f2ef 100644 --- a/ui/src/modules/common/Modals/EntityCancelModal.tsx +++ b/ui/src/modules/common/Modals/EntityCancelModal.tsx @@ -1,7 +1,11 @@ import React from "react" import { useNavigate } from "react-router-dom" import { Button, Modal } from "antd" -import { GitCommit, LinktreeLogo, Path } from "@phosphor-icons/react" +import { + GitCommitIcon, + LinktreeLogoIcon, + PathIcon, +} from "@phosphor-icons/react" import { useAppStore } from "../../../store" import { JOB_CREATION_STEPS } from "../../../utils/constants" @@ -28,11 +32,11 @@ const EntityCancelModal: React.FC = ({
{type === JOB_CREATION_STEPS.SOURCE ? ( - + ) : type === JOB_CREATION_STEPS.DESTINATION ? ( - + ) : ( - + )}
diff --git a/ui/src/modules/common/Modals/EntityEditModal.tsx b/ui/src/modules/common/Modals/EntityEditModal.tsx index 74d31bbe..0713e85a 100644 --- a/ui/src/modules/common/Modals/EntityEditModal.tsx +++ b/ui/src/modules/common/Modals/EntityEditModal.tsx @@ -1,13 +1,14 @@ import { useNavigate } from "react-router-dom" import { formatDistanceToNow } from "date-fns" import { Button, Modal, Table, message } from "antd" -import { InfoIcon, Warning } from "@phosphor-icons/react" +import { InfoIcon, WarningIcon } from "@phosphor-icons/react" import { useAppStore } from "../../../store" import { sourceService } from "../../../api" import { destinationService } from "../../../api/services/destinationService" import { EntityEditModalProps } from "../../../types" import { getConnectorImage } from "../../../utils/utils" +import { TEST_CONNECTION_STATUS } from "../../../utils/constants" const EntityEditModal = ({ entityType }: EntityEditModalProps) => { const navigate = useNavigate() @@ -69,7 +70,10 @@ const EntityEditModal = ({ entityType }: EntityEditModalProps) => { ? await sourceService.testSourceConnection(getEntityData()) : await destinationService.testDestinationConnection(getEntityData()) - if (testResult.data?.status === "SUCCEEDED") { + if ( + testResult.data?.connection_result.status === + TEST_CONNECTION_STATUS.SUCCEEDED + ) { setTimeout(() => { setShowTestingModal(false) setShowSuccessModal(true) @@ -78,16 +82,18 @@ const EntityEditModal = ({ entityType }: EntityEditModalProps) => { setTimeout(async () => { setShowSuccessModal(false) await updateEntity(selectedEntity.id.toString(), selectedEntity) - message.success(`${entityType} updated successfully`) navigate(navigatePath) }, 2000) } else { + const testConnectionError = { + message: testResult.data?.connection_result.message || "", + logs: testResult.data?.logs || [], + } setShowTestingModal(false) - setTestConnectionError(testResult.data?.message || "") + setTestConnectionError(testConnectionError) setShowFailureModal(true) } } catch (error) { - message.error(`Failed to update ${entityType}`) console.error(error) } } @@ -157,7 +163,7 @@ const EntityEditModal = ({ entityType }: EntityEditModalProps) => { - diff --git a/ui/src/modules/common/Modals/EntitySavedModal.tsx b/ui/src/modules/common/Modals/EntitySavedModal.tsx index fa646348..5d458c8f 100644 --- a/ui/src/modules/common/Modals/EntitySavedModal.tsx +++ b/ui/src/modules/common/Modals/EntitySavedModal.tsx @@ -1,5 +1,10 @@ import { useNavigate } from "react-router-dom" -import { Check, GitCommit, Path, LinktreeLogo } from "@phosphor-icons/react" +import { + CheckIcon, + GitCommitIcon, + PathIcon, + LinktreeLogoIcon, +} from "@phosphor-icons/react" import { Button, Modal } from "antd" import { useAppStore } from "../../../store" import { EntitySavedModalProps } from "../../../types" @@ -23,11 +28,11 @@ const EntitySavedModal: React.FC = ({
{type === "source" ? ( - + ) : type === JOB_CREATION_STEPS.STREAMS ? ( - + ) : ( - + )}
@@ -40,11 +45,11 @@ const EntitySavedModal: React.FC = ({
{type === "source" ? ( - + ) : type === JOB_CREATION_STEPS.STREAMS ? ( - + ) : ( - + )} {entityName || @@ -56,7 +61,7 @@ const EntitySavedModal: React.FC = ({
- + Success
diff --git a/ui/src/modules/common/Modals/IngestionModeChangeModal.tsx b/ui/src/modules/common/Modals/IngestionModeChangeModal.tsx new file mode 100644 index 00000000..4a3ebab1 --- /dev/null +++ b/ui/src/modules/common/Modals/IngestionModeChangeModal.tsx @@ -0,0 +1,56 @@ +import { Button, Modal } from "antd" +import { useAppStore } from "../../../store" +import { IngestionModeChangeModalProps } from "../../../types/modalTypes" + +const IngestionModeChangeModal = ({ + onConfirm, + ingestionMode, +}: IngestionModeChangeModalProps) => { + const { showIngestionModeChangeModal, setShowIngestionModeChangeModal } = + useAppStore() + + return ( + +
+
+ Switch to {ingestionMode} for all tables ? +
+ +
+
+ All tables will be switched to {ingestionMode} mode, +
+
+ You can change mode for individual tables +
+
+ +
+ + +
+
+
+ ) +} + +export default IngestionModeChangeModal diff --git a/ui/src/modules/common/Modals/SpecFailedModal.tsx b/ui/src/modules/common/Modals/SpecFailedModal.tsx new file mode 100644 index 00000000..01961bb9 --- /dev/null +++ b/ui/src/modules/common/Modals/SpecFailedModal.tsx @@ -0,0 +1,95 @@ +import { message, Modal } from "antd" +import { CopySimpleIcon } from "@phosphor-icons/react" + +import { useAppStore } from "../../../store" +import ErrorIcon from "../../../assets/ErrorIcon.svg" + +const SpecFailedModal = ({ + fromSource, + error, + onTryAgain, +}: { + fromSource: boolean + error: string + onTryAgain: () => void +}) => { + const { showSpecFailedModal, setShowSpecFailedModal } = useAppStore() + + const handleTryAgain = () => { + setShowSpecFailedModal(false) + onTryAgain() + } + + const handleCopyLogs = async () => { + try { + await navigator.clipboard.writeText(error) + message.success("Logs copied to clipboard!") + } catch { + message.error("Failed to copy logs") + } + } + + const handleClose = () => { + setShowSpecFailedModal(false) + } + + return ( + +
+
+
+ Error +
+
+
+

Failed

+

+ {fromSource ? "Source" : "Destination"} Spec Load Failed +

+
+
+
Error
+ +
+
+
+ {error} +
+
+
+
+
+ + +
+
+
+ ) +} + +export default SpecFailedModal diff --git a/ui/src/modules/common/Modals/TestConnectionFailureModal.tsx b/ui/src/modules/common/Modals/TestConnectionFailureModal.tsx index 402ca57b..5056cc10 100644 --- a/ui/src/modules/common/Modals/TestConnectionFailureModal.tsx +++ b/ui/src/modules/common/Modals/TestConnectionFailureModal.tsx @@ -1,9 +1,12 @@ +import { useState } from "react" import { useNavigate } from "react-router-dom" -import { Modal } from "antd" -import { Info } from "@phosphor-icons/react" +import { message, Modal } from "antd" +import { CopySimpleIcon } from "@phosphor-icons/react" +import clsx from "clsx" import { useAppStore } from "../../../store" import ErrorIcon from "../../../assets/ErrorIcon.svg" +import { getLogTextColor, getLogLevelClass } from "../../../utils/utils" const TestConnectionFailureModal = ({ fromSources, @@ -16,14 +19,17 @@ const TestConnectionFailureModal = ({ sourceTestConnectionError, destinationTestConnectionError, } = useAppStore() + const [isExpanded, setIsExpanded] = useState(false) const navigate = useNavigate() const handleCancel = () => { setShowFailureModal(false) + setIsExpanded(false) } const handleBackToPath = () => { setShowFailureModal(false) + setIsExpanded(false) if (fromSources) { navigate("/sources") } else { @@ -31,15 +37,39 @@ const TestConnectionFailureModal = ({ } } + const handleReadMore = () => setIsExpanded(!isExpanded) + + const handleCopyLogs = async () => { + try { + await navigator.clipboard.writeText( + JSON.stringify( + fromSources + ? sourceTestConnectionError?.logs || [] + : destinationTestConnectionError?.logs || [], + null, + 4, + ), + ) + message.success("Logs copied to clipboard!") + } catch { + message.error("Failed to copy logs") + } + } + return ( -
+
-
+

Failed

-

+

Your test connection has failed

-
- - - {fromSources - ? sourceTestConnectionError - : destinationTestConnectionError} - +
+
+
Error
+ {isExpanded && ( + + )} +
+
+ {!isExpanded ? ( +
+ {fromSources + ? sourceTestConnectionError?.message || "" + : destinationTestConnectionError?.message || ""} +
+ ) : ( +
+ + {(fromSources + ? sourceTestConnectionError?.logs || [] + : destinationTestConnectionError?.logs || [] + ).map((jobLog, index) => ( + + + + + ))} + +
+ + {jobLog.level} + + + {jobLog.message} +
+ )} + + {!isExpanded && ( + + )} +
diff --git a/ui/src/modules/common/components/DocumentationPanel.tsx b/ui/src/modules/common/components/DocumentationPanel.tsx index b4c008f1..5021d21e 100644 --- a/ui/src/modules/common/components/DocumentationPanel.tsx +++ b/ui/src/modules/common/components/DocumentationPanel.tsx @@ -2,10 +2,10 @@ import { useState, useRef, useEffect } from "react" import clsx from "clsx" import { Button, Tooltip } from "antd" import { - CornersOut, - CaretRight, - Info, - ArrowSquareOut, + CornersOutIcon, + CaretRightIcon, + InfoIcon, + ArrowSquareOutIcon, } from "@phosphor-icons/react" import { DocumentationPanelProps } from "../../../types" @@ -44,6 +44,7 @@ const DocumentationPanel: React.FC = ({ if (!iframe) return const handleLoad = () => { + // as the theme for ui is light themed we need to show only light theme in docs website as the default theme is dark // Post message to iframe for theming iframe.contentWindow?.postMessage({ theme: "light" }, "https://olake.io") @@ -78,7 +79,7 @@ const DocumentationPanel: React.FC = ({ className="flex items-center" onClick={openInNewTab} icon={ - @@ -91,7 +92,7 @@ const DocumentationPanel: React.FC = ({ className="flex items-center bg-blue-600" onClick={toggleDocPanel} icon={ - @@ -120,7 +121,7 @@ const DocumentationPanel: React.FC = ({ isDocPanelCollapsed ? "rotate-180" : "rotate-0", )} > - +
@@ -144,7 +145,7 @@ const DocumentationPanel: React.FC = ({ diff --git a/ui/src/modules/common/components/Form/BooleanSwitchWidget.tsx b/ui/src/modules/common/components/Form/BooleanSwitchWidget.tsx index 36338023..c50a622a 100644 --- a/ui/src/modules/common/components/Form/BooleanSwitchWidget.tsx +++ b/ui/src/modules/common/components/Form/BooleanSwitchWidget.tsx @@ -1,3 +1,6 @@ +/** + * BooleanSwitchWidget is a component that renders a boolean switch this overrides the default boolean field template of rjsf + */ import { Switch } from "antd" import { WidgetProps } from "@rjsf/utils" diff --git a/ui/src/modules/common/components/Form/CustomFieldTemplate.tsx b/ui/src/modules/common/components/Form/CustomFieldTemplate.tsx index 85e384ba..0348f1fe 100644 --- a/ui/src/modules/common/components/Form/CustomFieldTemplate.tsx +++ b/ui/src/modules/common/components/Form/CustomFieldTemplate.tsx @@ -1,5 +1,5 @@ import { FieldTemplateProps } from "@rjsf/utils" -import { Info, Plus, Trash } from "@phosphor-icons/react" +import { InfoIcon, PlusIcon, TrashIcon } from "@phosphor-icons/react" import { Tooltip, Button } from "antd" import { useState, useEffect } from "react" @@ -42,13 +42,13 @@ function KeyValueRow({
- - @@ -123,7 +129,7 @@ const Sidebar: React.FC<{ collapsed ? "rotate-180" : "rotate-0", )} > - +
diff --git a/ui/src/modules/destinations/components/DestinationEmptyState.tsx b/ui/src/modules/destinations/components/DestinationEmptyState.tsx index 3af708eb..f2fa1d10 100644 --- a/ui/src/modules/destinations/components/DestinationEmptyState.tsx +++ b/ui/src/modules/destinations/components/DestinationEmptyState.tsx @@ -1,5 +1,5 @@ import { Button } from "antd" -import { PlayCircle, Plus } from "@phosphor-icons/react" +import { PlayCircleIcon, PlusIcon } from "@phosphor-icons/react" import { DestinationTutorialYTLink } from "../../../utils/constants" import FirstDestination from "../../../assets/FirstDestination.svg" @@ -29,7 +29,7 @@ const DestinationEmptyState = ({ className="border-1 mb-12 border-[1px] border-[#D9D9D9] bg-white px-6 py-4 text-black" onClick={handleCreateDestination} > - + New Destination
@@ -48,7 +48,7 @@ const DestinationEmptyState = ({
- + OLake/ Tutorial
diff --git a/ui/src/modules/destinations/components/DestinationTable.tsx b/ui/src/modules/destinations/components/DestinationTable.tsx index f2b25d8e..b8d0e97e 100644 --- a/ui/src/modules/destinations/components/DestinationTable.tsx +++ b/ui/src/modules/destinations/components/DestinationTable.tsx @@ -1,6 +1,10 @@ import { useState } from "react" import { Table, Input, Button, Dropdown, Pagination } from "antd" -import { DotsThree, PencilSimpleLine, Trash } from "@phosphor-icons/react" +import { + DotsThreeIcon, + PencilSimpleLineIcon, + TrashIcon, +} from "@phosphor-icons/react" import { DestinationTableProps, Entity } from "../../../types" import { getConnectorImage } from "../../../utils/utils" @@ -31,13 +35,13 @@ const DestinationTable: React.FC = ({ items: [ { key: "edit", - icon: , + icon: , label: "Edit", onClick: () => onEdit(String(record.id)), }, { key: "delete", - icon: , + icon: , label: "Delete", danger: true, onClick: () => onDelete(record), @@ -49,7 +53,7 @@ const DestinationTable: React.FC = ({ >
), }, @@ -26,7 +26,7 @@ export const connectorOptions: ConnectorOption[] = [ alt="Apache Iceberg" className="mr-2 size-5" /> - Apache Iceberg + Apache Iceberg
), }, diff --git a/ui/src/modules/destinations/pages/CreateDestination.tsx b/ui/src/modules/destinations/pages/CreateDestination.tsx index 43a2069a..ae8714e4 100644 --- a/ui/src/modules/destinations/pages/CreateDestination.tsx +++ b/ui/src/modules/destinations/pages/CreateDestination.tsx @@ -35,6 +35,7 @@ import { DESTINATION_INTERNAL_TYPES, OLAKE_LATEST_VERSION_URL, SETUP_TYPES, + TEST_CONNECTION_STATUS, transformErrors, } from "../../../utils/constants" import EndpointTitle from "../../../utils/EndpointTitle" @@ -53,6 +54,8 @@ import CustomFieldTemplate from "../../common/components/Form/CustomFieldTemplat import validator from "@rjsf/validator-ajv8" import ArrayFieldTemplate from "../../common/components/Form/ArrayFieldTemplate" import { widgets } from "../../common/components/Form/widgets" +import { AxiosError } from "axios" +import SpecFailedModal from "../../common/Modals/SpecFailedModal" type ConnectorType = (typeof CONNECTOR_TYPES)[keyof typeof CONNECTOR_TYPES] @@ -81,6 +84,7 @@ const CreateDestination = forwardRef< onConnectorChange, onFormDataChange, onVersionChange, + onExistingDestinationIdChange, docsMinimized = false, onDocsMinimizedChange, sourceConnector, @@ -117,6 +121,7 @@ const CreateDestination = forwardRef< const [destinationNameError, setDestinationNameError] = useState< string | null >(null) + const [specError, setSpecError] = useState(null) const navigate = useNavigate() const resetVersionState = () => { @@ -138,6 +143,7 @@ const CreateDestination = forwardRef< setShowFailureModal, setShowSourceCancelModal, setDestinationTestConnectionError, + setShowSpecFailedModal, } = useAppStore() const parseDestinationConfig = ( @@ -225,10 +231,12 @@ const CreateDestination = forwardRef< setLoadingVersions(true) try { const response = await destinationService.getDestinationVersions( - connector.toLowerCase(), + connector === CONNECTOR_TYPES.APACHE_ICEBERG + ? DESTINATION_INTERNAL_TYPES.ICEBERG + : DESTINATION_INTERNAL_TYPES.S3, ) - if (response.data?.version) { - const receivedVersions = response.data.version + if (response?.version) { + const receivedVersions = response?.version setVersions(receivedVersions) if (receivedVersions.length > 0) { let defaultVersion = receivedVersions[0] @@ -257,7 +265,7 @@ const CreateDestination = forwardRef< fetchVersions() }, [connector, onVersionChange, setupType]) - useEffect(() => { + const handleFetchSpec = () => { if (!version) { setSchema(null) setUiSchema(null) @@ -267,6 +275,7 @@ const CreateDestination = forwardRef< if (setupType === SETUP_TYPES.EXISTING) return setLoading(true) + // cancels old requests when new one is made return withAbortController( signal => destinationService.getDestinationSpec( @@ -284,9 +293,19 @@ const CreateDestination = forwardRef< setSchema({}) setUiSchema({}) console.error("Error fetching destination spec:", error) + if (error instanceof AxiosError) { + setSpecError(error.response?.data.message) + } else { + setSpecError("Failed to fetch spec, Please try again.") + } + setShowSpecFailedModal(true) }, () => setLoading(false), ) + } + + useEffect(() => { + return handleFetchSpec() }, [ connector, version, @@ -306,6 +325,7 @@ const CreateDestination = forwardRef< setShowSourceCancelModal(true) } + //makes sure user enters destination name and version and fills all the required fields in the form const validateDestination = async (): Promise => { try { if (setupType === SETUP_TYPES.NEW) { @@ -368,11 +388,15 @@ const CreateDestination = forwardRef< try { setShowTestingModal(true) + //test the connection and show either success or failure modal based on the result const testResult = await destinationService.testDestinationConnection(newDestinationData) setShowTestingModal(false) - if (testResult.data?.status === "SUCCEEDED") { + if ( + testResult.data?.connection_result.status === + TEST_CONNECTION_STATUS.SUCCEEDED + ) { setShowSuccessModal(true) setTimeout(() => { setShowSuccessModal(false) @@ -381,7 +405,11 @@ const CreateDestination = forwardRef< .catch(error => console.error("Error adding destination:", error)) }, 1000) } else { - setDestinationTestConnectionError(testResult.data?.message || "") + const testConnectionError = { + message: testResult.data?.connection_result.message || "", + logs: testResult.data?.logs || [], + } + setDestinationTestConnectionError(testConnectionError) setShowFailureModal(true) } } catch (error) { @@ -408,6 +436,7 @@ const CreateDestination = forwardRef< setConnector(value as ConnectorType) if (setupType === SETUP_TYPES.EXISTING) { setExistingDestination(null) + onExistingDestinationIdChange?.(null) setDestinationName("") onDestinationNameChange?.("") } @@ -424,6 +453,7 @@ const CreateDestination = forwardRef< const handleSetupTypeChange = (type: SetupType) => { setSetupType(type) setDestinationName("") + onExistingDestinationIdChange?.(null) onDestinationNameChange?.("") if (onDocsMinimizedChange) { @@ -439,6 +469,7 @@ const CreateDestination = forwardRef< setSchema(null) setConnector(CONNECTOR_TYPES.DESTINATION_DEFAULT_CONNECTOR) // Reset to default connector setExistingDestination(null) + onExistingDestinationIdChange?.(null) // Schema will be automatically fetched due to useEffect when connector changes if (onConnectorChange) onConnectorChange(CONNECTOR_TYPES.AMAZON_S3) if (onFormDataChange) onFormDataChange({}) @@ -467,6 +498,7 @@ const CreateDestination = forwardRef< setDestinationName(selectedDestination.name) setFormData(configObj) setExistingDestination(value) + onExistingDestinationIdChange?.(selectedDestination.id) } const handleVersionChange = (value: string) => { @@ -493,6 +525,7 @@ const CreateDestination = forwardRef<
= ({ {loadingVersions ? ( @@ -527,6 +528,7 @@ const DestinationEdit: React.FC = ({ ) : versions.length > 0 ? (
) : availableVersions.length > 0 ? (