diff --git a/.claude/commands/generate-entity.md b/.claude/commands/generate-entity.md new file mode 100755 index 0000000..46f8f1a --- /dev/null +++ b/.claude/commands/generate-entity.md @@ -0,0 +1,485 @@ +# Generate Entity Command + +Generate a complete CRUD entity in the hyperfleet application following the **plugin-based architecture**. + +## Plugin System Overview + +**NEW APPROACH**: hyperfleet now uses a plugin system for entity registration. Each entity is self-contained in a plugin package that automatically registers: +- Service locators +- HTTP routes +- Event controllers +- Presenter mappings (Kind and Path) + +**Key Benefits**: +- ✅ **90% reduction in manual updates** (from 8 files to 3 files) +- ✅ **Single source of truth** - all entity wiring in one plugin file +- ✅ **Auto-discovery** - plugin registers itself via `init()` function +- ✅ **Type-safe** - compile-time checks for service access +- ✅ **Easier maintenance** - self-contained entity logic + +**File Count Summary**: +- **1 new plugin file** (`plugins/{entity}s/plugin.go`) - replaces 5 manual update steps +- **10 standard files** (API model, DAO, service, handlers, presenters, migration, OpenAPI, tests, factories) +- **3 manual updates** (main.go import, migration list, OpenAPI refs) + +## Instructions + +You will guide the user through creating a new entity with all required artifacts. Use the Dinosaur plugin (`plugins/dinosaurs/plugin.go`) as the reference pattern. + +### Step 1: Gather Requirements + +Ask the user for: +1. **Entity Name** (singular, PascalCase): e.g., "Widget", "Product", "Customer" +2. **Entity Fields**: Additional fields beyond the base Meta fields (ID, CreatedAt, UpdatedAt, DeletedAt) + - Field name (camelCase in code, snake_case in DB) + - Field type (string, int, bool, time.Time, etc.) + - Database constraints (index, unique, etc.) +3. **API Path** (plural, lowercase): e.g., "widgets", "products", "customers" + +### Step 2: Create Required Files + +Use the TodoWrite tool to track your progress through these steps: + +#### 2.1 Plugin Package (`plugins/{entity}s/plugin.go`) + +**This is the NEW core file that replaces manual service locator, route registration, and controller setup.** + +Create a plugin file that registers: +- Service locator function +- Route registration +- Controller registration +- Presenter mappings (Kind and Path) + +Example pattern from `plugins/dinosaurs/plugin.go`: +```go +package widgets + +import ( + "net/http" + + "github.com/gorilla/mux" + "github.com/openshift-hyperfleet/hyperfleet/cmd/hyperfleet/environments" + "github.com/openshift-hyperfleet/hyperfleet/cmd/hyperfleet/environments/registry" + "github.com/openshift-hyperfleet/hyperfleet/cmd/hyperfleet/server" + "github.com/openshift-hyperfleet/hyperfleet/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet/pkg/api/presenters" + "github.com/openshift-hyperfleet/hyperfleet/pkg/auth" + "github.com/openshift-hyperfleet/hyperfleet/pkg/controllers" + "github.com/openshift-hyperfleet/hyperfleet/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet/pkg/handlers" + "github.com/openshift-hyperfleet/hyperfleet/pkg/services" +) + +// Service Locator +type WidgetServiceLocator func() services.WidgetService + +func NewWidgetServiceLocator(env *environments.Env) WidgetServiceLocator { + return func() services.WidgetService { + return services.NewWidgetService( + db.NewAdvisoryLockFactory(env.Database.SessionFactory), + dao.NewWidgetDao(&env.Database.SessionFactory), + env.Services.Events(), + ) + } +} + +// WidgetService helper function to get the widget service from the registry +func WidgetService(s *environments.Services) services.WidgetService { + if s == nil { + return nil + } + if obj := s.GetService("Widgets"); obj != nil { + locator := obj.(WidgetServiceLocator) + return locator() + } + return nil +} + +func init() { + // Service registration + registry.RegisterService("Widgets", func(env interface{}) interface{} { + return NewWidgetServiceLocator(env.(*environments.Env)) + }) + + // Routes registration + server.RegisterRoutes("widgets", func(apiV1Router *mux.Router, services server.ServicesInterface, authMiddleware auth.JWTMiddleware, authzMiddleware auth.AuthorizationMiddleware) { + envServices := services.(*environments.Services) + widgetHandler := handlers.NewWidgetHandler(WidgetService(envServices), envServices.Generic()) + + widgetsRouter := apiV1Router.PathPrefix("/widgets").Subrouter() + widgetsRouter.HandleFunc("", widgetHandler.List).Methods(http.MethodGet) + widgetsRouter.HandleFunc("/{id}", widgetHandler.Get).Methods(http.MethodGet) + widgetsRouter.HandleFunc("", widgetHandler.Create).Methods(http.MethodPost) + widgetsRouter.HandleFunc("/{id}", widgetHandler.Patch).Methods(http.MethodPatch) + widgetsRouter.HandleFunc("/{id}", widgetHandler.Delete).Methods(http.MethodDelete) + widgetsRouter.Use(authMiddleware.AuthenticateAccountJWT) + widgetsRouter.Use(authzMiddleware.AuthorizeApi) + }) + + // Controller registration + server.RegisterController("Widgets", func(manager *controllers.KindControllerManager, services *environments.Services) { + widgetServices := WidgetService(services) + + manager.Add(&controllers.ControllerConfig{ + Source: "Widgets", + Handlers: map[api.EventType][]controllers.ControllerHandlerFunc{ + api.CreateEventType: {widgetServices.OnUpsert}, + api.UpdateEventType: {widgetServices.OnUpsert}, + api.DeleteEventType: {widgetServices.OnDelete}, + }, + }) + }) + + // Presenter registration + presenters.RegisterPath(api.Widget{}, "widgets") + presenters.RegisterPath(&api.Widget{}, "widgets") + presenters.RegisterKind(api.Widget{}, "Widget") + presenters.RegisterKind(&api.Widget{}, "Widget") +} +``` + +**Key Features:** +- **Self-contained**: All entity registration in one file +- **Auto-discovery**: Uses `init()` function for automatic registration +- **No manual edits needed**: Eliminates updates to routes.go, controllers.go, types.go, framework.go +- **Service locator pattern**: Provides type-safe service access +- **Helper function**: `WidgetService()` retrieves service from registry + +#### 2.2 API Model (`pkg/api/{entity}_types.go`) + +Create the entity struct with: +- Embedded `Meta` struct (provides ID, CreatedAt, UpdatedAt, DeletedAt) +- Custom fields from requirements +- List and Index types +- BeforeCreate hook for ID generation +- PatchRequest struct for updates + +Example pattern: +```go +package api + +import "gorm.io/gorm" + +type Widget struct { + Meta + Name string + Description string + Status string +} + +type WidgetList []*Widget +type WidgetIndex map[string]*Widget + +func (l WidgetList) Index() WidgetIndex { + index := WidgetIndex{} + for _, o := range l { + index[o.ID] = o + } + return index +} + +func (w *Widget) BeforeCreate(tx *gorm.DB) error { + w.ID = NewID() + return nil +} + +type WidgetPatchRequest struct { + Name *string `json:"name,omitempty"` + Description *string `json:"description,omitempty"` + Status *string `json:"status,omitempty"` +} +``` + +#### 2.2 DAO Layer (`pkg/dao/{entity}.go`) + +Create interface and implementation with: +- Get(ctx, id) - retrieve by ID +- Create(ctx, entity) - create new record +- Replace(ctx, entity) - update existing record +- Delete(ctx, id) - delete record +- FindByIDs(ctx, ids) - batch retrieval +- All(ctx) - retrieve all records +- Custom finders as needed + +Pattern: See `pkg/dao/dinosaur.go` + +#### 2.3 DAO Mock (`pkg/dao/mocks/{entity}.go`) + +Create mock implementation for testing. + +Pattern: See `pkg/dao/mocks/dinosaur.go` + +#### 2.4 Service Layer (`pkg/services/{entity}s.go`) + +Create interface and implementation with: +- CRUD operations (Get, Create, Replace, Delete, All, FindByIDs) +- Event-driven handlers (OnUpsert, OnDelete) +- Business logic and validation +- Event creation for CREATE, UPDATE, DELETE operations +- Advisory lock for concurrent updates + +Pattern: See `pkg/services/dinosaurs.go` + +Key features: +- Use `LockFactory` for advisory locks on updates +- Create events after successful operations +- Implement idempotent OnUpsert and OnDelete handlers + +**NOTE**: Service instantiation is now handled in the plugin file, NOT in a separate locator file + +#### 2.5 Presenters (`pkg/api/presenters/{entity}.go`) + +Create conversion functions: +- `Convert{Entity}(openapi.{Entity}) *api.{Entity}` - OpenAPI to internal model +- `Present{Entity}(*api.{Entity}) openapi.{Entity}` - Internal to OpenAPI model + +Pattern: See `pkg/api/presenters/dinosaur.go` + +#### 2.6 Handlers (`pkg/handlers/{entity}.go`) + +Create HTTP handlers: +- Create - POST endpoint +- Get - GET by ID endpoint +- List - GET collection endpoint with pagination +- Patch - PATCH update endpoint +- Delete - DELETE endpoint + +Pattern: See `pkg/handlers/dinosaur.go` + +Include validation in handlers using the `validate` pattern. + +#### 2.7 Database Migration (`pkg/db/migrations/{timestamp}_add_{entity}s.go`) + +Create migration with: +- Timestamp ID (YYYYMMDDHHMM format) +- Inline model definition (never import from pkg/api) +- Migrate function using AutoMigrate +- Rollback function using DropTable + +Pattern: See `pkg/db/migrations/201911212019_add_dinosaurs.go` + +**IMPORTANT**: Use inline struct definition in migration, not imported types. + +#### 2.8 OpenAPI Specification (`openapi/openapi.{entity}s.yaml`) + +Create OpenAPI spec with: +- Path definitions for collection and item endpoints +- Schema definitions for entity, list, and patch request +- Parameter definitions (id, page, size, search, orderBy, fields) +- Security requirements (Bearer token) +- Response codes and schemas + +Pattern: See `openapi/openapi.dinosaurs.yaml` + +#### 2.9 Plugin Import (`cmd/hyperfleet/main.go`) + +**IMPORTANT**: Add a blank import to ensure the plugin's `init()` function runs: + +```go +import ( + _ "github.com/openshift-hyperfleet/hyperfleet/plugins/dinosaurs" + _ "github.com/openshift-hyperfleet/hyperfleet/plugins/widgets" // <- Add this +) +``` + +This triggers the plugin registration when the application starts. + +#### 2.10 Test Factory (`test/factories/{entity}s.go`) + +Create factory methods: +- `New{Entity}(params) (*api.{Entity}, error)` - create single entity +- `New{Entity}List(prefix, count) ([]*api.{Entity}, error)` - create list + +Pattern: See `test/factories/dinosaurs.go` + +#### 2.11 Integration Tests (`test/integration/{entity}s_test.go`) + +Create integration tests: +- Test{Entity}Get - test GET by ID (200, 404, 401) +- Test{Entity}Post - test CREATE (201, 400) +- Test{Entity}Patch - test UPDATE (200, 404, 400) +- Test{Entity}Paging - test pagination +- Test{Entity}ListSearch - test search functionality + +Pattern: See `test/integration/dinosaurs_test.go` + +### Step 3: Update Existing Files + +**With the plugin system, most manual file updates are ELIMINATED!** Only these files need updates: + +#### 3.1 Update `cmd/hyperfleet/main.go` + +Add plugin import (triggers auto-registration): +```go +import ( + _ "github.com/openshift-hyperfleet/hyperfleet/plugins/widgets" // <- Add this +) +``` + +#### 3.2 Update `pkg/db/migrations/migration_structs.go` + +Add migration to list: +```go +var MigrationList = []*gormigrate.Migration{ + addDinosaurs(), + addEvents(), + addWidgets(), // <- Add this +} +``` + +#### 3.3 Update `openapi/openapi.yaml` + +Add reference to entity spec: +```yaml +paths: + $ref: + - 'openapi.dinosaurs.yaml' + - 'openapi.widgets.yaml' # <- Add this +``` + +### Step 4: Generate OpenAPI Client Code + +After creating the OpenAPI spec, run: +```bash +make generate +``` + +**IMPORTANT: Wait for completion and verify results** + +This command takes 2-3 minutes to complete. You MUST: + +1. **Run the command and wait for completion:** + ```bash + make generate 2>&1 | tee generate.log + ``` + +2. **Verify the generated files exist:** + ```bash + ls -la pkg/api/openapi/model_{entity}*.go + ls -la pkg/api/openapi/docs/{Entity}*.md + ``` + +3. **Check for compilation errors:** + ```bash + go build ./cmd/hyperfleet + ``` + +4. **If generation fails or times out:** + - Check the Docker/Podman daemon is running + - Review the full output in generate.log + - Verify openapi.yaml syntax is valid + +**Expected generated files:** +- `pkg/api/openapi/model_{entity}.go` +- `pkg/api/openapi/model_{entity}_all_of.go` +- `pkg/api/openapi/model_{entity}_list.go` +- `pkg/api/openapi/model_{entity}_list_all_of.go` +- `pkg/api/openapi/model_{entity}_patch_request.go` +- `pkg/api/openapi/docs/{Entity}*.md` +- Updated `pkg/api/openapi/api_default.go` with new endpoints + +**Do NOT proceed to Step 5 until:** +- [ ] All expected files exist +- [ ] `go build ./cmd/hyperfleet` completes without errors +- [ ] Integration test files compile successfully + +### Step 5: Verify Compilation + +Before running tests, ensure the code compiles: + +```bash +# Build the binary to catch any compilation errors +go build -o /tmp/hyperfleet ./cmd/hyperfleet + +# If this fails, review the errors and fix: +# - Missing imports +# - Type mismatches in presenters +# - Undefined constants or types +``` + +### Step 6: Test the Implementation + +```bash +# Run database migrations +make db/teardown +make db/setup +./hyperfleet-api migrate + +# Run integration tests +make test-integration + +# Run specific entity tests +go test -v ./test/integration -run TestWidget + +``` +#### Step 6.1: Create a test script + +Create a shell script to test the new endpoints for Widget using curl commands + +### Key Patterns to Follow + +1. **Naming Conventions**: + - API paths: snake_case plural (e.g., `/widgets`, `/product_categories`) + - Go types: PascalCase (e.g., `Widget`, `ProductCategory`) + - Go variables: camelCase (e.g., `widget`, `productCategory`) + - Database tables: snake_case plural (e.g., `widgets`, `product_categories`) + +2. **Event-Driven Architecture**: + - Create events for all CREATE, UPDATE, DELETE operations + - Implement idempotent OnUpsert and OnDelete handlers + - Register handlers in controllers.go + +3. **Database Patterns**: + - Use advisory locks for concurrent updates + - All entities embed the `Meta` struct + - Migrations use inline struct definitions + - Use GORM for ORM operations + +4. **API Patterns**: + - Use OpenAPI specs for all endpoints + - Follow RESTful conventions + - Use presenters to convert between OpenAPI and internal models + - Include proper validation in handlers + +5. **Testing Patterns**: + - Create factory methods for test data + - Test all CRUD operations + - Test error cases (404, 400, 401) + - Test pagination and search + +6. **Security**: + - All endpoints require JWT authentication + - Use authorization middleware + - Validate all inputs in handlers + +### Checklist + +After completing all steps, verify: +- [ ] **Plugin file created** (`plugins/{entity}s/plugin.go`) +- [ ] All 10 other new files created (API model, DAO, service, handlers, presenters, migration, OpenAPI, tests, factories) +- [ ] **Only 3 existing files updated** (main.go, migration_structs.go, openapi.yaml) +- [ ] Plugin import added to `cmd/hyperfleet/main.go` +- [ ] OpenAPI client code regenerated (`make generate`) +- [ ] Generated files verified to exist +- [ ] Code compiles without errors (`go build ./cmd/hyperfleet`) +- [ ] Database migrations run successfully +- [ ] Integration tests pass +- [ ] API endpoints respond correctly +- [ ] Events are created and processed +- [ ] **Plugin auto-discovery working** (routes, controllers, presenters registered automatically) + +## Example Usage + +To generate a new "Widget" entity with fields "name" and "description": + +1. User provides entity details +2. **You create the plugin file first** (`plugins/widgets/plugin.go`) - this is the NEW core integration point +3. You create all other required files following the patterns +4. **You update only 3 existing files** (main.go import, migration_structs.go, openapi.yaml) +5. You run `make generate` to create OpenAPI client code +6. You verify with tests +7. Create a script file to test the endpoints + + +Remember to use TodoWrite tool to track progress through all steps! diff --git a/.gitignore b/.gitignore new file mode 100755 index 0000000..44adaca --- /dev/null +++ b/.gitignore @@ -0,0 +1,51 @@ +# See https://help.github.com/articles/ignoring-files for more about ignoring files. +# +# If you find yourself ignoring temporary files generated by your text editor +# or operating system, you probably want to add a global ignore instead: +# git config --global core.excludesfile '~/.gitignore_global' + +# Ignore bundler config. +/.bundle + +# Ignore all logfiles and tempfiles. +/log/* +/tmp/* +!/log/.keep +!/tmp/.keep + +# Ignore uploaded files in development +/storage/* + +# Ignore temporary *.swp files +*.swp + +# Ignore test databases +/db.sqlite.test.* + +.byebug_history + +# Ignore master key for decrypting credentials and more. +/config/master.key + +/coverage + +# Ignore built binaries +/hyperfleet-api + +# Ignore generated templates +/templates/*-template.json + +# Ignore editor config +.vscode + +# Ignore IntelliJ config +.idea/ + +# Ignore secrets directory +secrets + +# Ignore vendor directory +/vendor/ + +# Ignore generated OpenAPI code +/pkg/api/openapi/ diff --git a/.golangciversion b/.golangciversion new file mode 100755 index 0000000..b978278 --- /dev/null +++ b/.golangciversion @@ -0,0 +1 @@ +1.43.0 diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100755 index 0000000..be7f62d --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,795 @@ +# CLAUDE.md + +This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository. + +## Project Overview + +HyperFleet API is a stateless REST API that serves as the pure data layer for the HyperFleet cluster lifecycle management system. It provides CRUD operations for clusters and node pools, accepts status updates from adapters, and stores all resource data in PostgreSQL. This API contains no business logic and creates no events - it is purely a data persistence and retrieval service. + +## Architecture Context + +HyperFleet API is one component in the HyperFleet architecture: + +- **HyperFleet API** (this service): Pure CRUD data layer with PostgreSQL +- **Sentinel Service**: Centralized business logic and event publishing +- **Adapters**: Execute operations (DNS, Hypershift, etc.) and report status back to API + +The API's role is strictly limited to: +1. Accept resource creation/update/delete requests +2. Persist resource data to PostgreSQL +3. Accept status updates from adapters via POST `/{resourceType}/{id}/statuses` +4. Serve resource data to Sentinel via GET `/{resourceType}` +5. Calculate aggregate status from adapter conditions + +## Technology Stack + +### Core Technologies +- **Language**: Go 1.24.9 with FIPS-compliant crypto (CGO_ENABLED=1, GOEXPERIMENT=boringcrypto) +- **Database**: PostgreSQL 14.2 with GORM ORM +- **API Specification**: TypeSpec → OpenAPI 3.0.3 +- **Code Generation**: openapi-generator-cli v7.16.0 +- **Container Runtime**: Podman +- **Testing**: gotestsum, Gomega, Resty, Testcontainers + +### Why These Choices + +**Go 1.24**: Required for FIPS compliance in enterprise/government deployments + +**TypeSpec**: Provides type-safe API specification with better maintainability than writing OpenAPI YAML manually + +**GORM**: Provides database abstraction with migration support and PostgreSQL-specific features + +**Testcontainers**: Enables integration tests with real PostgreSQL instances without external dependencies + +## Development Commands + +### Building and Running +```bash +make binary # Build the hyperfleet-api binary +make install # Build and install binary to GOPATH/bin +make run # Run migrations and start server with authentication +make run-no-auth # Run server without authentication (development mode) +``` + +### Testing +```bash +make test # Run unit tests +make test-integration # Run integration tests +make ci-test-unit # Run unit tests with JSON output for CI +make ci-test-integration # Run integration tests with JSON output for CI +``` + +### Code Quality +```bash +make verify # Run source code verification (vet, formatting) +make lint # Run golangci-lint +``` + +### Database Operations +```bash +make db/setup # Start PostgreSQL container locally +make db/login # Connect to local PostgreSQL database +make db/teardown # Stop and remove PostgreSQL container +./hyperfleet-api migrate # Run database migrations +``` + +### Code Generation +```bash +make generate # Regenerate Go models from openapi/openapi.yaml +make generate-vendor # Generate using vendor dependencies (offline mode) +``` + +## Project Structure + +``` +hyperfleet-api/ +├── cmd/hyperfleet/ # Application entry point +│ ├── migrate/ # Database migration command +│ ├── serve/ # API server command +│ └── environments/ # Environment configuration +│ ├── development.go # Local development settings +│ ├── integration_testing.go # Integration test settings +│ ├── unit_testing.go # Unit test settings +│ └── production.go # Production settings +├── pkg/ +│ ├── api/ # API models and OpenAPI spec +│ │ ├── openapi/ # Generated Go models +│ │ │ ├── api/openapi.yaml # Embedded OpenAPI spec (44KB, fully resolved) +│ │ │ └── model_*.go # Generated model structs +│ │ └── openapi_embed.go # Go embed directive for OpenAPI spec +│ ├── dao/ # Data Access Objects +│ │ ├── cluster.go # Cluster CRUD operations +│ │ ├── nodepool.go # NodePool CRUD operations +│ │ ├── adapter_status.go # Status CRUD operations +│ │ └── label.go # Label operations +│ ├── db/ # Database layer +│ │ ├── db.go # GORM connection and session factory +│ │ ├── transaction_middleware.go # HTTP middleware for DB transactions +│ │ └── migrations/ # GORM migration files +│ ├── handlers/ # HTTP request handlers +│ │ ├── cluster_handler.go # Cluster endpoint handlers +│ │ ├── nodepool_handler.go # NodePool endpoint handlers +│ │ └── compatibility_handler.go # API compatibility endpoint +│ ├── services/ # Service layer (status aggregation, search) +│ │ ├── cluster_service.go # Cluster business operations +│ │ └── nodepool_service.go # NodePool business operations +│ ├── config/ # Configuration management +│ ├── logger/ # Structured logging +│ └── errors/ # Error handling utilities +├── openapi/ +│ └── openapi.yaml # TypeSpec-generated OpenAPI spec (32KB, source) +├── test/ +│ ├── integration/ # Integration tests for all endpoints +│ └── factories/ # Test data factories +└── Makefile # Build automation +``` + +## Core Components + +### 1. API Specification Workflow + +The API is specified using TypeSpec, which compiles to OpenAPI, which then generates Go models: + +``` +TypeSpec (.tsp files in hyperfleet-api-spec repo) + ↓ tsp compile +openapi/openapi.yaml (32KB, uses $ref for DRY) + ↓ make generate (openapi-generator-cli in Podman) +pkg/api/openapi/model_*.go (Go structs) +pkg/api/openapi/api/openapi.yaml (44KB, fully resolved, embedded in binary) +``` + +**Key Points**: +- TypeSpec definitions are maintained in a separate `hyperfleet-api-spec` repository +- `openapi/openapi.yaml` is the source of truth for this repository (generated from TypeSpec) +- `make generate` uses Podman to run openapi-generator-cli, ensuring consistent versions +- Generated code includes JSON tags, validation, and type definitions +- The fully resolved spec is embedded at compile time via `//go:embed` + +### 2. Database Layer + +**GORM Session Management**: +```go +// pkg/db/db.go +type SessionFactory interface { + NewSession(ctx context.Context) *gorm.DB + Close() error +} +``` + +**Transaction Middleware**: +All HTTP requests automatically get a database session via middleware at pkg/db/transaction_middleware.go:13: +```go +func TransactionMiddleware(next http.Handler, connection SessionFactory) http.Handler { + // Creates session for each request + // Stores in context + // Auto-commits on success, rolls back on error +} +``` + +**Schema**: +```sql +-- Core resource tables +clusters (id, name, spec JSONB, generation, labels, created_at, updated_at) +node_pools (id, name, owner_id FK, spec JSONB, labels, created_at, updated_at) + +-- Status tracking +adapter_statuses (owner_type, owner_id, adapter, observed_generation, conditions JSONB) + +-- Labels for filtering +labels (owner_type, owner_id, key, value) +``` + +**Migration System**: +GORM AutoMigrate is used at startup via `./hyperfleet-api migrate` command. + +### 3. Data Access Objects (DAO) + +DAOs provide CRUD operations with GORM: + +**Example - Cluster DAO**: +```go +type ClusterDAO interface { + Create(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) + Get(ctx context.Context, id string) (*api.Cluster, error) + List(ctx context.Context, listArgs *ListArgs) (*api.ClusterList, error) + Update(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) + Delete(ctx context.Context, id string) error +} +``` + +**Patterns**: +- All DAO methods take `context.Context` for transaction propagation +- Session is retrieved from context via `db.NewContext()` +- List operations support pagination via `ListArgs` +- Search is implemented via GORM WHERE clauses + +### 4. HTTP Handlers + +Handlers follow a consistent pattern at pkg/handlers/: + +```go +func (h *clusterHandler) Create(w http.ResponseWriter, r *http.Request) { + // 1. Parse request body + var cluster openapi.Cluster + json.NewDecoder(r.Body).Decode(&cluster) + + // 2. Call service/DAO + result, err := h.service.Create(r.Context(), &cluster) + + // 3. Handle errors + if err != nil { + errors.SendError(w, r, err) + return + } + + // 4. Send response + w.WriteHeader(http.StatusCreated) + json.NewEncoder(w).Encode(result) +} +``` + +### 5. Status Aggregation Pattern + +The API calculates aggregate status from adapter-specific conditions: + +**Adapter Status Structure**: +```json +{ + "adapter": "dns-adapter", + "observed_generation": 1, + "conditions": [ + { + "adapter": "dns-adapter", + "type": "Ready", + "status": "True", + "observed_generation": 1, + "reason": "ClusterProvisioned", + "message": "Cluster successfully provisioned", + "created_at": "2025-11-17T15:04:05Z", + "updated_at": "2025-11-17T15:04:05Z" + } + ] +} +``` + +**Aggregation Logic**: +- Phase is `Ready` if all adapters report `Ready=True` +- Phase is `Failed` if any adapter reports `Ready=False` +- Phase is `NotReady` otherwise (progressing, unknown, or missing conditions) +- `observed_generation` tracks which spec version the adapter has seen + +**Why This Pattern**: +Kubernetes-style conditions allow multiple independent adapters to report status without coordination. The API simply aggregates these into a summary phase for client convenience. + +## API Resources + +### Cluster + +**Endpoints**: +- `GET /api/hyperfleet/v1/clusters` - List with pagination and search +- `POST /api/hyperfleet/v1/clusters` - Create new cluster +- `GET /api/hyperfleet/v1/clusters/{cluster_id}` - Get single cluster +- `GET /api/hyperfleet/v1/clusters/{cluster_id}/statuses` - Get adapter statuses +- `POST /api/hyperfleet/v1/clusters/{cluster_id}/statuses` - Report status from adapter + +**Key Fields**: +- `spec` (JSON): Cloud provider configuration (region, version, nodes, etc.) +- `generation` (int): Increments on each spec change, enables optimistic concurrency +- `labels` (map): Key-value pairs for categorization and filtering +- `status.observed_generation`: Latest generation that adapters have processed + +### NodePool + +**Endpoints**: +- `GET /api/hyperfleet/v1/nodepools` - List all node pools +- `GET /api/hyperfleet/v1/clusters/{cluster_id}/nodepools` - List cluster's node pools +- `POST /api/hyperfleet/v1/clusters/{cluster_id}/nodepools` - Create node pool +- `GET /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id}` - Get single node pool +- `GET /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id}/statuses` - Get statuses +- `POST /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id}/statuses` - Report status + +**Key Fields**: +- `owner_references.id`: Parent cluster ID (enforced via foreign key) +- `spec` (JSON): Instance type, replica count, disk size, etc. +- Status follows same pattern as Cluster + +## hyperfleet CLI Commands + +The `hyperfleet` binary provides two main subcommands: + +### `hyperfleet serve` - Start the API Server + +Serves the hyperfleet REST API with full authentication, database connectivity, and monitoring capabilities. + +**Basic Usage:** +```bash +./hyperfleet-api serve # Start server on localhost:8000 +./hyperfleet-api serve --api-server-bindaddress :8080 # Custom bind address +./hyperfleet-api serve --enable-authz=false --enable-jwt=false # No authentication +``` + +**Key Configuration Options:** + +- **Server Binding:** + - `--api-server-bindaddress` - API server bind address (default: "localhost:8000") + - `--api-server-hostname` - Server's public hostname + - `--enable-https` - Enable HTTPS rather than HTTP + - `--https-cert-file` / `--https-key-file` - TLS certificate files + +- **Database Configuration:** + - `--db-host-file` - Database host file (default: "secrets/db.host") + - `--db-name-file` - Database name file (default: "secrets/db.name") + - `--db-user-file` - Database username file (default: "secrets/db.user") + - `--db-password-file` - Database password file (default: "secrets/db.password") + - `--db-port-file` - Database port file (default: "secrets/db.port") + - `--db-sslmode` - Database SSL mode: disable | require | verify-ca | verify-full (default: "disable") + - `--db-max-open-connections` - Maximum open DB connections (default: 50) + - `--enable-db-debug` - Enable database debug mode + +- **Authentication & Authorization:** + - `--enable-jwt` - Enable JWT authentication validation (default: true) + - `--enable-authz` - Enable authorization on endpoints (default: true) + - `--jwk-cert-url` - JWK Certificate URL for JWT validation (default: Red Hat SSO) + - `--jwk-cert-file` - Local JWK Certificate file + - `--acl-file` - Access control list file + +- **OCM Integration:** + - `--enable-ocm-mock` - Enable mock OCM clients (default: true) + - `--ocm-base-url` - OCM API base URL (default: integration environment) + - `--ocm-token-url` - OCM token endpoint URL (default: Red Hat SSO) + - `--ocm-client-id-file` - OCM API client ID file (default: "secrets/ocm-service.clientId") + - `--ocm-client-secret-file` - OCM API client secret file (default: "secrets/ocm-service.clientSecret") + - `--self-token-file` - OCM API privileged offline SSO token file + - `--ocm-debug` - Enable OCM API debug logging + +- **Monitoring & Health Checks:** + - `--health-check-server-bindaddress` - Health check server address (default: "localhost:8083") + - `--enable-health-check-https` - Enable HTTPS for health check server + - `--metrics-server-bindaddress` - Metrics server address (default: "localhost:8080") + - `--enable-metrics-https` - Enable HTTPS for metrics server + +- **Performance Tuning:** + - `--http-read-timeout` - HTTP server read timeout (default: 5s) + - `--http-write-timeout` - HTTP server write timeout (default: 30s) + - `--label-metrics-inclusion-duration` - Telemetry collection timeframe (default: 168h) + +### `hyperfleet migrate` - Run Database Migrations + +Executes database schema migrations to set up or update the database structure. + +**Basic Usage:** +```bash +./hyperfleet-api migrate # Run all pending migrations +./hyperfleet-api migrate --enable-db-debug # Run with database debug logging +``` + +**Configuration Options:** +- **Database Connection:** (same as serve command) + - `--db-host-file`, `--db-name-file`, `--db-user-file`, `--db-password-file` + - `--db-port-file`, `--db-sslmode`, `--db-rootcert` + - `--db-max-open-connections` - Maximum DB connections (default: 50) + - `--enable-db-debug` - Enable database debug mode + +**Migration Process:** +- Applies all pending migrations in order +- Creates migration tracking table if needed +- Idempotent - safe to run multiple times +- Logs each migration applied + +### Common Global Flags + +All subcommands support these logging flags: +- `--logtostderr` - Log to stderr instead of files (default: true) +- `--alsologtostderr` - Log to both stderr and files +- `--log_dir` - Directory for log files +- `--stderrthreshold` - Minimum log level for stderr (default: 2) +- `-v, --v` - Log level for verbose logs +- `--vmodule` - Module-specific log levels +- `--log_backtrace_at` - Emit stack trace at specific file:line + +## Development Workflow + +### Environment Setup + +```bash +# Prerequisites: Go 1.24, Podman, PostgreSQL client tools + +# Generate OpenAPI code (required before go mod download) +make generate + +# Install dependencies +go install gotest.tools/gotestsum@latest +go mod download + +# Initialize secrets directory with default values +make secrets + +# Start PostgreSQL +make db/setup + +# Build binary +make binary + +# Run migrations +./hyperfleet-api migrate + +# Start server (no authentication) +make run-no-auth +``` + +### Code Generation + +When the TypeSpec specification changes: + +```bash +# Regenerate Go models from openapi/openapi.yaml +make generate + +# This will: +# 1. Remove pkg/api/openapi/* +# 2. Build Docker image with openapi-generator-cli +# 3. Generate model_*.go files +# 4. Copy fully resolved openapi.yaml to pkg/api/openapi/api/ +``` + +### Testing + +**Unit Tests**: +```bash +OCM_ENV=unit_testing make test +``` + +**Integration Tests**: +```bash +OCM_ENV=integration_testing make test-integration +``` + +Integration tests use Testcontainers to spin up real PostgreSQL instances. Each test gets a fresh database to ensure isolation. + +### Database Operations + +```bash +# Connect to database +make db/login + +# Inspect schema +\dt + +# Stop database +make db/teardown +``` + +## Configuration Management + +### Environment-Based Configuration + +The application uses `OCM_ENV` environment variable to select configuration: + +- `development` - Local development with localhost database +- `unit_testing` - In-memory or minimal database +- `integration_testing` - Testcontainers-based PostgreSQL +- `production` - Production credentials from secrets + +**Environment Implementation**: See cmd/hyperfleet/environments/framework.go:66 + +Each environment can override: +- Database connection settings +- OCM client configuration (mock vs real) +- Service implementations +- Handler configurations + +### Configuration Files + +Configuration is loaded from `secrets/` directory: + +``` +secrets/ +├── db.host # Database hostname +├── db.name # Database name +├── db.password # Database password +├── db.port # Database port +├── db.user # Database username +├── ocm-service.clientId +├── ocm-service.clientSecret +└── ocm-service.token +``` + +Initialize with defaults: `make secrets` + +## Logging + +Structured logging is provided via pkg/logger/logger.go:36: + +```go +log := logger.NewOCMLogger(ctx) +log.Infof("Processing cluster %s", clusterID) +log.Extra("cluster_id", clusterID).Extra("operation", "create").Info("Cluster created") +``` + +**Log Context**: +- `[opid=xxx]` - Operation ID for request tracing +- `[accountID=xxx]` - User account ID from JWT +- `[tx_id=xxx]` - Database transaction ID + +## Error Handling + +Errors use a structured error type defined in pkg/errors/: + +```go +type ServiceError struct { + HttpCode int + Code string + Reason string +} +``` + +**Pattern**: +```go +if err != nil { + serviceErr := errors.GeneralError("Failed to create cluster") + errors.SendError(w, r, serviceErr) + return +} +``` + +Errors are automatically converted to OpenAPI error responses with operation IDs for debugging. + +## Authentication & Authorization + +The API supports two modes: + +**No Auth** (development): +```bash +make run-no-auth +``` + +**OCM JWT Auth** (production): +- Validates JWT tokens from Red Hat SSO +- Extracts account ID and username from claims +- Enforces organization-based access control + +**Implementation**: JWT middleware validates tokens and populates context with user information. + +## Key Design Patterns + +### 1. Context-Based Session Management + +Database sessions are stored in request context via middleware. This ensures: +- Automatic transaction lifecycle +- Thread-safe session access +- Proper cleanup on request completion + +### 2. Polymorphic Status Tables + +`adapter_statuses` uses `owner_type` + `owner_id` to support multiple resource types: +```sql +SELECT * FROM adapter_statuses +WHERE owner_type = 'Cluster' AND owner_id = '123' +``` + +This avoids creating separate status tables for each resource type. + +### 3. Generation-Based Optimistic Concurrency + +The `generation` field increments on each spec update: +```go +cluster.Generation++ // On each update +``` + +Adapters report `observed_generation` in status to indicate which version they've processed. This enables: +- Detecting when spec has changed since adapter last processed +- Preventing race conditions in distributed systems +- Tracking reconciliation progress + +### 4. Embedded OpenAPI Specification + +The OpenAPI spec is embedded at compile time using Go 1.16+ `//go:embed`: + +```go +//go:embed openapi/api/openapi.yaml +var openapiFS embed.FS +``` + +This means: +- No file I/O at runtime +- Spec is always available even in containers +- Swagger UI works without external files +- Binary is self-contained + +## Testing Strategy + +### Integration Test Coverage + +All 12 API endpoints have integration test coverage in test/integration/: + +- Cluster CRUD operations +- NodePool CRUD operations +- Status reporting and aggregation +- Pagination behavior +- Search functionality +- Error cases (not found, validation errors) + +### Test Data Factories + +Test factories in test/factories/ provide consistent test data: + +```go +factories.NewClusterBuilder(). + WithName("test-cluster"). + WithSpec(clusterSpec). + Build() +``` + +### Testcontainers Pattern + +Integration tests use Testcontainers to create isolated PostgreSQL instances: + +```go +// Each test suite gets a fresh database +container := testcontainers.PostgreSQL() +defer container.Terminate() +``` + +This ensures: +- No state leakage between tests +- Tests can run in parallel +- No external database dependency + +### Database Issues During Testing + +If integration tests fail with PostgreSQL-related errors (missing columns, transaction issues), recreate the database: + +```bash +# From project root directory +make db/teardown # Stop and remove PostgreSQL container +make db/setup # Start fresh PostgreSQL container +./hyperfleet-api migrate # Apply migrations +make test-integration # Run tests again +``` + +**Note:** Always run `make` commands from the project root directory where the Makefile is located. + +## Common Development Tasks + +### Debugging Database Issues + +```bash +# Connect to database +make db/login + +# Check what GORM created +\dt # List tables +\d clusters # Describe clusters table +\d adapter_statuses # Check status table + +# Inspect data +SELECT id, name, generation FROM clusters; +SELECT owner_type, owner_id, adapter, conditions FROM adapter_statuses; +``` + +### Viewing OpenAPI Specification + +```bash +# Start server +make run-no-auth + +# View raw OpenAPI spec +curl http://localhost:8000/openapi + +# Use Swagger UI +open http://localhost:8000/openapi-ui +``` + +## Server Configuration + +The server is configured in cmd/hyperfleet/server/: + +**Ports**: +- `8000` - Main API server +- `8080` - Metrics endpoint +- `8083` - Health check endpoint + +**Middleware Chain**: +1. Request logging +2. Operation ID injection +3. JWT authentication (if enabled) +4. Database transaction creation +5. Route handler + +**Implementation**: See cmd/hyperfleet/server/server.go:19 + +## Common Pitfalls + +### 1. Forgetting to Run Migrations + +**Symptom**: Server starts but endpoints return errors about missing tables + +**Solution**: Always run `./hyperfleet-api migrate` after pulling code or changing schemas + +### 2. Using Wrong OpenAPI File + +**Problem**: There are two openapi.yaml files: +- `openapi/openapi.yaml` (32KB, source, has $ref) +- `pkg/api/openapi/api/openapi.yaml` (44KB, generated, fully resolved) + +**Rule**: Only edit the source file. The generated file is overwritten by `make generate`. + +### 3. Context Session Access + +**Wrong**: +```go +db := gorm.Open(...) // Creates new connection +``` + +**Right**: +```go +db := db.NewContext(ctx) // Gets session from middleware +``` + +Always use the context-based session to participate in the HTTP request transaction. + +### 4. Status Phase Calculation + +The API automatically calculates status.phase from adapter conditions. Don't set phase manually - it will be overwritten. + +## Performance Considerations + +### Database Indexes + +Ensure indexes exist for common queries: +```sql +CREATE INDEX idx_clusters_name ON clusters(name); +CREATE INDEX idx_adapter_statuses_owner ON adapter_statuses(owner_type, owner_id); +CREATE INDEX idx_labels_owner ON labels(owner_type, owner_id); +``` + +### JSONB Queries + +Spec and conditions are stored as JSONB, enabling: +```sql +-- Query by spec field +SELECT * FROM clusters WHERE spec->>'region' = 'us-west-2'; + +-- Query by condition +SELECT * FROM adapter_statuses +WHERE conditions @> '[{"type": "Ready", "status": "True"}]'; +``` + +### Connection Pooling + +GORM manages connection pooling automatically. Configure via: +```go +db.DB().SetMaxOpenConns(100) +db.DB().SetMaxIdleConns(10) +``` + +## Deployment + +The API is designed to be stateless and horizontally scalable: + +- No in-memory state +- All data in PostgreSQL +- No event creation or message queues +- Kubernetes-ready (multiple replicas) + +**Health Check**: `GET /healthcheck` returns 200 OK when database is accessible + +**Metrics**: Prometheus metrics available at `/metrics` + +## References + +- **Architecture Documentation**: `/Users/ymsun/Documents/workspace/src/github.com/openshift-hyperfleet/architecture` +- **TypeSpec Repository**: `hyperfleet-api-spec` (API specification source) +- **GORM Documentation**: https://gorm.io/docs/ +- **OpenAPI Generator**: https://openapi-generator.tech/ +- **Testcontainers**: https://testcontainers.com/ + +## Getting Help + +Common issues and solutions: + +1. **Database connection errors**: Check `make db/setup` was run and container is running +2. **Generated code issues**: Run `make generate` to regenerate from OpenAPI spec +3. **Test failures**: Ensure PostgreSQL container is running and `OCM_ENV` is set +4. **Build errors**: Verify Go version is 1.24+ with `go version` diff --git a/Dockerfile b/Dockerfile new file mode 100755 index 0000000..cf119fa --- /dev/null +++ b/Dockerfile @@ -0,0 +1,21 @@ +FROM registry.access.redhat.com/ubi9/ubi-minimal:9.2-750.1697534106 + +RUN \ + microdnf install -y \ + util-linux \ + && \ + microdnf clean all + +COPY \ + hyperfleet-api \ + /usr/local/bin/ + +EXPOSE 8000 + +ENTRYPOINT ["/usr/local/bin/hyperfleet-api", "serve"] + +LABEL name="hyperfleet-api" \ + vendor="Red Hat" \ + version="0.0.1" \ + summary="HyperFleet API" \ + description="HyperFleet API" diff --git a/Dockerfile.openapi b/Dockerfile.openapi new file mode 100755 index 0000000..90f2cf9 --- /dev/null +++ b/Dockerfile.openapi @@ -0,0 +1,35 @@ +FROM openapitools/openapi-generator-cli:v7.16.0 + +RUN apt-get update +RUN apt-get install -y make sudo git wget + +# Install Go 1.24 +RUN wget https://go.dev/dl/go1.24.0.linux-amd64.tar.gz && \ + tar -C /usr/local -xzf go1.24.0.linux-amd64.tar.gz && \ + rm go1.24.0.linux-amd64.tar.gz + +RUN mkdir -p /local + +# Copy go.mod and go.sum first for better layer caching +COPY go.mod go.sum /local/ + +# Copy the rest of the project +COPY . /local + +ENV PATH="/uhc/bin:/usr/local/go/bin:${PATH}" +ENV GOPATH="/uhc" +ENV GOBIN /usr/local/go/bin/ +ENV CGO_ENABLED=0 + +# these git and go flags to avoid self signed certificate errors + +WORKDIR /local + +# Install go-bindata +RUN go install -a github.com/go-bindata/go-bindata/...@v3.1.2 +RUN bash /usr/local/bin/docker-entrypoint.sh generate -i /local/openapi/openapi.yaml -g go -o /local/pkg/api/openapi +RUN rm /local/pkg/api/openapi/go.mod /local/pkg/api/openapi/go.sum +RUN rm -r /local/pkg/api/openapi/test +# Run go generate +RUN go generate /local/cmd/hyperfleet-api/main.go +RUN gofmt -w /local/pkg/api/openapi diff --git a/LICENSE b/LICENSE new file mode 100755 index 0000000..1c9ac71 --- /dev/null +++ b/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2023 Red Hat + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100755 index 0000000..9d93710 --- /dev/null +++ b/Makefile @@ -0,0 +1,275 @@ +.DEFAULT_GOAL := help + +# CGO_ENABLED=0 is not FIPS compliant. large commercial vendors and FedRAMP require FIPS compliant crypto +CGO_ENABLED := 1 + +# Enable users to override the golang used to accomodate custom installations +GO ?= go + +# Version information for build metadata +version:=$(shell date +%s) + +# a tool for managing containers and images, etc. You can set it as docker +container_tool ?= podman + +# Database connection details +db_name:=hyperfleet +db_host=hyperfleet-db.$(namespace) +db_port=5432 +db_user:=hyperfleet +db_password:=foobar-bizz-buzz +db_password_file=${PWD}/secrets/db.password +db_sslmode:=disable +db_image?=docker.io/library/postgres:14.2 + +# Log verbosity level +glog_v:=10 + +# Location of the JSON web key set used to verify tokens: +jwks_url:=https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/certs + +# Test output files +unit_test_json_output ?= ${PWD}/unit-test-results.json +integration_test_json_output ?= ${PWD}/integration-test-results.json + +# Prints a list of useful targets. +help: + @echo "" + @echo "HyperFleet API - Cluster Lifecycle Management Service" + @echo "" + @echo "make verify verify source code" + @echo "make lint run golangci-lint" + @echo "make binary compile binaries" + @echo "make install compile binaries and install in GOPATH bin" + @echo "make secrets initialize secrets directory with default values" + @echo "make run run the application" + @echo "make run/docs run swagger and host the api spec" + @echo "make test run unit tests" + @echo "make test-integration run integration tests" + @echo "make generate generate openapi modules" + @echo "make clean delete temporary generated files" + @echo "$(fake)" +.PHONY: help + +# Encourage consistent tool versions +OPENAPI_GENERATOR_VERSION:=5.4.0 +GO_VERSION:=go1.24. + +### Constants: +version:=$(shell date +%s) +GOLANGCI_LINT_BIN:=$(shell go env GOPATH)/bin/golangci-lint + +# Version information for ldflags +git_sha:=$(shell git rev-parse --short HEAD 2>/dev/null || echo "unknown") +git_dirty:=$(shell git diff --quiet 2>/dev/null || echo "-modified") +build_version:=$(git_sha)$(git_dirty) +build_time:=$(shell date -u '+%Y-%m-%d %H:%M:%S UTC') +ldflags=-X github.com/openshift-hyperfleet/hyperfleet-api/pkg/api.Version=$(build_version) -X 'github.com/openshift-hyperfleet/hyperfleet-api/pkg/api.BuildTime=$(build_time)' + +### Envrionment-sourced variables with defaults +# Can be overriden by setting environment var before running +# Example: +# OCM_ENV=unit_testing make run +# export OCM_ENV=testing; make run +# Set the environment to development by default +ifndef OCM_ENV + OCM_ENV:=development +endif + +ifndef TEST_SUMMARY_FORMAT + TEST_SUMMARY_FORMAT=short-verbose +endif + +ifndef OCM_BASE_URL + OCM_BASE_URL:="https://api.integration.openshift.com" +endif + +# Checks if a GOPATH is set, or emits an error message +check-gopath: +ifndef GOPATH + $(error GOPATH is not set) +endif +.PHONY: check-gopath + +# Verifies that source passes standard checks. +verify: check-gopath + ${GO} vet \ + ./cmd/... \ + ./pkg/... + ! gofmt -l cmd pkg test |\ + sed 's/^/Unformatted file: /' |\ + grep . + @ ${GO} version | grep -q "$(GO_VERSION)" || \ + ( \ + printf '\033[41m\033[97m\n'; \ + echo "* Your go version is not the expected $(GO_VERSION) *" | sed 's/./*/g'; \ + echo "* Your go version is not the expected $(GO_VERSION) *"; \ + echo "* Your go version is not the expected $(GO_VERSION) *" | sed 's/./*/g'; \ + printf '\033[0m'; \ + ) +.PHONY: verify + +# Runs our linter to verify that everything is following best practices +# Requires golangci-lint to be installed @ $(go env GOPATH)/bin/golangci-lint +# Linter is set to ignore `unused` stuff due to example being incomplete by definition +lint: + $(GOLANGCI_LINT_BIN) run -e unused \ + ./cmd/... \ + ./pkg/... +.PHONY: lint + +# Build binaries +# NOTE it may be necessary to use CGO_ENABLED=0 for backwards compatibility with centos7 if not using centos7 +binary: check-gopath + echo "Building version: ${build_version}" + ${GO} build -ldflags="$(ldflags)" -o hyperfleet-api ./cmd/hyperfleet-api +.PHONY: binary + +# Install +install: check-gopath + CGO_ENABLED=$(CGO_ENABLED) GOEXPERIMENT=boringcrypto ${GO} install -ldflags="$(ldflags)" ./cmd/hyperfleet-api + @ ${GO} version | grep -q "$(GO_VERSION)" || \ + ( \ + printf '\033[41m\033[97m\n'; \ + echo "* Your go version is not the expected $(GO_VERSION) *" | sed 's/./*/g'; \ + echo "* Your go version is not the expected $(GO_VERSION) *"; \ + echo "* Your go version is not the expected $(GO_VERSION) *" | sed 's/./*/g'; \ + printf '\033[0m'; \ + ) +.PHONY: install + +# Initialize secrets directory with default values +secrets: + @mkdir -p secrets + @printf "localhost" > secrets/db.host + @printf "$(db_name)" > secrets/db.name + @printf "$(db_password)" > secrets/db.password + @printf "$(db_port)" > secrets/db.port + @printf "$(db_user)" > secrets/db.user + @printf "ocm-hyperfleet-testing" > secrets/ocm-service.clientId + @printf "your-client-secret-here" > secrets/ocm-service.clientSecret + @printf "your-token-here" > secrets/ocm-service.token + @echo "Secrets directory initialized with default values" +.PHONY: secrets + +# Runs the unit tests. +# +# Args: +# TESTFLAGS: Flags to pass to `go test`. The `-v` argument is always passed. +# +# Examples: +# make test TESTFLAGS="-run TestSomething" +test: install secrets + OCM_ENV=unit_testing gotestsum --format short-verbose -- -p 1 -v $(TESTFLAGS) \ + ./pkg/... \ + ./cmd/... +.PHONY: test + +# Runs the unit tests with json output +# +# Args: +# TESTFLAGS: Flags to pass to `go test`. The `-v` argument is always passed. +# +# Examples: +# make test-unit-json TESTFLAGS="-run TestSomething" +ci-test-unit: install secrets + OCM_ENV=unit_testing gotestsum --jsonfile-timing-events=$(unit_test_json_output) --format short-verbose -- -p 1 -v $(TESTFLAGS) \ + ./pkg/... \ + ./cmd/... +.PHONY: ci-test-unit + +# Runs the integration tests. +# +# Args: +# TESTFLAGS: Flags to pass to `go test`. The `-v` argument is always passed. +# +# Example: +# make test-integration +# make test-integration TESTFLAGS="-run TestAccounts" acts as TestAccounts* and run TestAccountsGet, TestAccountsPost, etc. +# make test-integration TESTFLAGS="-run TestAccountsGet" runs TestAccountsGet +# make test-integration TESTFLAGS="-short" skips long-run tests +ci-test-integration: install secrets + TESTCONTAINERS_RYUK_DISABLED=true OCM_ENV=integration_testing gotestsum --jsonfile-timing-events=$(integration_test_json_output) --format $(TEST_SUMMARY_FORMAT) -- -p 1 -ldflags -s -v -timeout 1h $(TESTFLAGS) \ + ./test/integration +.PHONY: ci-test-integration + +# Runs the integration tests. +# +# Args: +# TESTFLAGS: Flags to pass to `go test`. The `-v` argument is always passed. +# +# Example: +# make test-integration +# make test-integration TESTFLAGS="-run TestAccounts" acts as TestAccounts* and run TestAccountsGet, TestAccountsPost, etc. +# make test-integration TESTFLAGS="-run TestAccountsGet" runs TestAccountsGet +# make test-integration TESTFLAGS="-short" skips long-run tests +test-integration: install secrets + TESTCONTAINERS_RYUK_DISABLED=true OCM_ENV=integration_testing gotestsum --format $(TEST_SUMMARY_FORMAT) -- -p 1 -ldflags -s -v -timeout 1h $(TESTFLAGS) \ + ./test/integration +.PHONY: test-integration + +# Regenerate openapi client and models +generate: + rm -rf pkg/api/openapi + $(container_tool) build -t hyperfleet-openapi -f Dockerfile.openapi . + $(eval OPENAPI_IMAGE_ID=`$(container_tool) create -t hyperfleet-openapi -f Dockerfile.openapi .`) + $(container_tool) cp $(OPENAPI_IMAGE_ID):/local/pkg/api/openapi ./pkg/api/openapi + $(container_tool) cp $(OPENAPI_IMAGE_ID):/local/data/generated/openapi/openapi.go ./data/generated/openapi/openapi.go +.PHONY: generate + +# Regenerate openapi client and models using vendor (avoids downloading dependencies) +generate-vendor: + rm -rf pkg/api/openapi + $(container_tool) build -t hyperfleet-openapi-vendor -f Dockerfile.openapi.vendor . + $(eval OPENAPI_IMAGE_ID=`$(container_tool) create -t hyperfleet-openapi-vendor -f Dockerfile.openapi.vendor .`) + $(container_tool) cp $(OPENAPI_IMAGE_ID):/local/pkg/api/openapi ./pkg/api/openapi + $(container_tool) cp $(OPENAPI_IMAGE_ID):/local/data/generated/openapi/openapi.go ./data/generated/openapi/openapi.go +.PHONY: generate-vendor + +run: binary + ./hyperfleet-api migrate + ./hyperfleet-api serve +.PHONY: run + +run-no-auth: binary + ./hyperfleet-api migrate + ./hyperfleet-api serve --enable-authz=false --enable-jwt=false + +# Run Swagger nd host the api docs +run/docs: + @echo "Please open http://localhost/" + docker run -d -p 80:8080 -e SWAGGER_JSON=/hyperfleet.yaml -v $(PWD)/openapi/hyperfleet.yaml:/hyperfleet.yaml swaggerapi/swagger-ui +.PHONY: run/docs + +# Delete temporary files +clean: + rm -rf \ + $(binary) \ + data/generated/openapi/*.json \ + secrets \ +.PHONY: clean + +.PHONY: cmds +cmds: + for cmd in $$(ls cmd); do \ + CGO_ENABLED=$(CGO_ENABLED) ${GO} build \ + -ldflags="$(ldflags)" \ + -o "$${cmd}" \ + "./cmd/$${cmd}" \ + || exit 1; \ + done + + +.PHONY: db/setup +db/setup: secrets + @echo $(db_password) > $(db_password_file) + $(container_tool) run --name psql-hyperfleet -e POSTGRES_DB=$(db_name) -e POSTGRES_USER=$(db_user) -e POSTGRES_PASSWORD=$(db_password) -p $(db_port):5432 -d $(db_image) + +.PHONY: db/login +db/login: + $(container_tool) exec -it psql-hyperfleet bash -c "psql -h localhost -U $(db_user) $(db_name)" + +.PHONY: db/teardown +db/teardown: + $(container_tool) stop psql-hyperfleet + $(container_tool) rm psql-hyperfleet diff --git a/PREREQUISITES.md b/PREREQUISITES.md new file mode 100755 index 0000000..fe7aa08 --- /dev/null +++ b/PREREQUISITES.md @@ -0,0 +1,96 @@ +# Prerequisites + +hyperfleet-api requires the following tools to be pre-installed: + +## Go + +`Go` is an open-source programming language that makes it easy to build simple, reliable, and efficient software. + +- **Purpose**: Required for building and running the `hyperfleet` binary +- **Version**: Go 1.24 or higher (FIPS-compliant crypto support) +- **Installation**: Install Go from the [official Go website](https://golang.org/dl/) +- **Verification**: Run `go version` to verify installation + +## Podman + +`Podman` is a daemonless container engine for developing, managing, and running OCI containers. + +- **Purpose**: Used for running PostgreSQL database locally and for code generation (openapi-generator-cli) +- **Installation**: + - Podman: [https://podman.io/getting-started/installation](https://podman.io/getting-started/installation) + +## PostgreSQL Client Tools + +PostgreSQL client tools provide the `psql` command-line interface for database interaction. + +- **Purpose**: Required for `make db/login` to connect to the database and inspect schema +- **Installation**: + - macOS: `brew install postgresql` + - Ubuntu: `apt-get install postgresql-client` + - Fedora: `dnf install postgresql` +- **Note**: The PostgreSQL server itself runs in a container via `make db/setup` + +## gotestsum + +`gotestsum` is a Go test runner with enhanced output formatting. + +- **Purpose**: Required for running tests with formatted output +- **Installation**: `go install gotest.tools/gotestsum@latest` +- **Verification**: Run `gotestsum --version` + +## jq + +`jq` is a lightweight and flexible command-line JSON processor. + +- **Purpose**: Useful for parsing JSON outputs from API calls and commands +- **Installation**: Follow the instructions on the [jq official website](https://jqlang.github.io/jq/) +- **Verification**: Run `jq --version` + +## ocm CLI (Optional) + +`ocm` stands for OpenShift Cluster Manager CLI and is used for authentication in production mode. + +- **Purpose**: CLI tool for authenticating with OCM and making authenticated API requests +- **Installation**: Refer to the [OCM CLI documentation](https://github.com/openshift-online/ocm-cli) +- **Note**: Only required when running with authentication enabled (production mode) +- **Development**: For local development, use `make run-no-auth` which bypasses authentication + +## Quick Verification + +Run these commands to verify all prerequisites are installed: + +```bash +# Required tools +go version # Should show 1.24 or higher +podman --version +psql --version # PostgreSQL client +gotestsum --version # Test runner +jq --version # JSON processor + +# Optional tools +ocm version # OCM CLI (production auth only) +``` + +## Getting Started + +Once all prerequisites are installed, follow the development workflow in README.md: + +```bash +# Generate OpenAPI code (required before go mod download) +make generate + +# Install Go dependencies +go mod download +go install gotest.tools/gotestsum@latest + +# Initialize configuration +make secrets + +# Start database +make db/setup + +# Build and run +make binary +./hyperfleet-api migrate +make run-no-auth +``` diff --git a/README.md b/README.md old mode 100644 new mode 100755 index ca67b96..873f87d --- a/README.md +++ b/README.md @@ -1,18 +1,507 @@ -# hyperfleet-api +# HyperFleet API -HyperFleet API - Simple REST API for cluster lifecycle management. Provides CRUD operations for clusters and status sub-resources. Pure data layer with PostgreSQL integration - no business logic or event creation. Stateless design enables horizontal scaling. Part of HyperFleet v2 event-driven architecture. +HyperFleet API - Simple REST API for cluster lifecycle management. Provides CRUD operations for clusters and status sub-resources. Pure data layer with PostgreSQL integration - no business logic or event creation. Stateless design enables horizontal scaling. -## Repository Access +![HyperFleet](rhtap-hyperfleet_sm.png) -All members of the **hyperfleet** team have write access to this repository. +## Architecture -### Steps to Apply for Repository Access +### Technology Stack -If you're a team member and need access to this repository: +- **Language**: Go 1.24.9 +- **API Definition**: TypeSpec → OpenAPI 3.0.3 +- **Code Generation**: openapi-generator-cli v7.16.0 +- **Database**: PostgreSQL with GORM ORM +- **Container Runtime**: Podman +- **Testing**: Gomega + Resty -1. **Verify Organization Membership**: Ensure you're a member of the `openshift-hyperfleet` organization -2. **Check Team Assignment**: Confirm you're added to the hyperfleet team within the organization -3. **Repository Permissions**: All hyperfleet team members automatically receive write access -4. **OWNERS File**: Code reviews and approvals are managed through the OWNERS file +### Core Features -For access issues, contact a repository administrator or organization owner. +* TypeSpec-based API specification +* OpenAPI 3.0 code generation workflow +* Cluster and NodePool lifecycle management +* Adapter-based status reporting with Kubernetes-style conditions +* Pagination and search capabilities +* Complete integration test coverage +* Database migrations with GORM +* Embedded OpenAPI specification using `//go:embed` + +## Project Structure + +``` +hyperfleet-api/ +├── cmd/hyperfleet/ # Application entry point +├── pkg/ +│ ├── api/ # API models and handlers +│ │ ├── openapi/ # Generated Go models from OpenAPI +│ │ │ ├── api/ # Embedded OpenAPI specification +│ │ │ └── model_*.go # Generated model structs +│ │ └── openapi_embed.go # Go embed directive +│ ├── dao/ # Data access layer +│ ├── db/ # Database setup and migrations +│ ├── handlers/ # HTTP request handlers +│ ├── services/ # Business logic +│ └── server/ # Server configuration +├── openapi/ # API specification source +│ └── openapi.yaml # TypeSpec-generated OpenAPI spec (32KB) +├── test/ +│ ├── integration/ # Integration tests +│ └── factories/ # Test data factories +└── Makefile # Build automation +``` + +## API Resources + +### Cluster Management + +Cluster resources represent Kubernetes clusters managed across different cloud providers. + +**Endpoints:** +``` +GET /api/hyperfleet/v1/clusters +POST /api/hyperfleet/v1/clusters +GET /api/hyperfleet/v1/clusters/{cluster_id} +GET /api/hyperfleet/v1/clusters/{cluster_id}/statuses +POST /api/hyperfleet/v1/clusters/{cluster_id}/statuses +``` + +**Data Model:** +```json +{ + "kind": "Cluster", + "id": "string", + "name": "string", + "generation": 1, + "spec": { + "region": "us-west-2", + "version": "4.15", + "nodes": 3 + }, + "labels": { + "env": "production" + }, + "status": { + "phase": "Ready", + "observed_generation": 1, + "adapters": [...] + } +} +``` + +**Status Phases:** +- `NotReady` - Cluster is being provisioned or has failing conditions +- `Ready` - All adapter conditions report success +- `Failed` - Cluster provisioning or operation failed + +### NodePool Management + +NodePool resources represent groups of compute nodes within a cluster. + +**Endpoints:** +``` +GET /api/hyperfleet/v1/nodepools +GET /api/hyperfleet/v1/clusters/{cluster_id}/nodepools +POST /api/hyperfleet/v1/clusters/{cluster_id}/nodepools +GET /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id} +GET /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id}/statuses +POST /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id}/statuses +``` + +**Data Model:** +```json +{ + "kind": "NodePool", + "id": "string", + "name": "string", + "owner_references": { + "kind": "Cluster", + "id": "cluster_id" + }, + "spec": { + "instance_type": "m5.2xlarge", + "replicas": 3, + "disk_size": 120 + }, + "labels": {}, + "status": { + "phase": "Ready", + "adapters": [...] + } +} +``` + +### Adapter Status Pattern + +Resources report status through adapter-specific condition sets following Kubernetes conventions. + +**Structure:** +```json +{ + "adapter": "dns-adapter", + "observed_generation": 1, + "conditions": [ + { + "adapter": "dns-adapter", + "type": "Ready", + "status": "True", + "observed_generation": 1, + "reason": "ClusterProvisioned", + "message": "Cluster successfully provisioned", + "created_at": "2025-11-17T15:04:05Z", + "updated_at": "2025-11-17T15:04:05Z", + } + ], + "data": {} +} +``` + +**Note**: The `created_at` and `updated_at` fields in conditions are optional and typically set by the service. + +**Condition Types:** +- `Ready` - Resource is operational +- `Available` - Resource is available for use +- `Progressing` - Resource is being modified +- Custom types defined by adapters + +### List Response Pattern + +All list endpoints return consistent pagination metadata: + +```json +{ + "kind": "ClusterList", + "page": 1, + "size": 10, + "total": 100, + "items": [...] +} +``` + +**Pagination Parameters:** +- `?page=N` - Page number (default: 1) +- `?pageSize=N` - Items per page (default: 100) + +**Search Parameters (clusters only):** +- `?search=name='cluster-name'` - Filter by name + +## Development Workflow + +### Prerequisites + +Before running hyperfleet-api, ensure these prerequisites are installed. See [PREREQUISITES.md](./PREREQUISITES.md) for details. + +- Go 1.24 or higher +- Podman +- PostgreSQL 13+ +- Make + +### Initial Setup + +```bash +# 1. Generate OpenAPI code (must run first as pkg/api/openapi is required by go.mod) +make generate + +# 2. Install dependencies +go install gotest.tools/gotestsum@latest +go mod download + +# 3. Build the binary +make binary + +# 4. Setup PostgreSQL database +make db/setup + +# 5. Run database migrations +./hyperfleet-api migrate + +# 6. Verify database schema +make db/login +psql -h localhost -U hyperfleet hyperfleet +\dt +``` + +**Note**: The `pkg/api/openapi/` directory is not tracked in git. You must run `make generate` after cloning or pulling changes to the OpenAPI specification. + +### Running the Service + +**Local development (no authentication):** +```bash +make run-no-auth +``` + +The service starts on `localhost:8000`: +- REST API: `http://localhost:8000/api/hyperfleet/v1/` +- OpenAPI spec: `http://localhost:8000/openapi` +- Swagger UI: `http://localhost:8000/openapi-ui` +- Health check: `http://localhost:8083/healthcheck` +- Metrics: `http://localhost:8080/metrics` + +**Test the API:** +```bash +# Check API compatibility +curl http://localhost:8000/api/hyperfleet/v1/compatibility | jq + +# List clusters +curl http://localhost:8000/api/hyperfleet/v1/clusters | jq + +# Create a cluster +curl -X POST http://localhost:8000/api/hyperfleet/v1/clusters \ + -H "Content-Type: application/json" \ + -d '{ + "kind": "Cluster", + "name": "prod-cluster-1", + "spec": { + "region": "us-west-2", + "version": "4.15", + "nodes": 3 + }, + "labels": { + "env": "production" + } + }' | jq +``` + +### Testing + +```bash +# Unit tests +make test + +# Integration tests (requires running database) +make test-integration +``` + +**Test Coverage:** + +All 12 API endpoints have integration test coverage: + +| Endpoint | Coverage | +|----------|----------| +| GET /compatibility | ✓ | +| GET /clusters | ✓ (list, pagination, search) | +| POST /clusters | ✓ | +| GET /clusters/{id} | ✓ | +| GET /clusters/{id}/statuses | ✓ | +| POST /clusters/{id}/statuses | ✓ | +| GET /nodepools | ✓ (list, pagination) | +| GET /clusters/{id}/nodepools | ✓ | +| POST /clusters/{id}/nodepools | ✓ | +| GET /clusters/{id}/nodepools/{nodepool_id} | ✓ | +| GET /clusters/{id}/nodepools/{nodepool_id}/statuses | ✓ | +| POST /clusters/{id}/nodepools/{nodepool_id}/statuses | ✓ | + +## Code Generation Workflow + +### TypeSpec to OpenAPI + +The API specification is defined using TypeSpec and compiled to OpenAPI 3.0 from [hyperfleet-api-spec](https://github.com/openshift-hyperfleet/hyperfleet-api-spec): + +``` +TypeSpec definitions (.tsp files) + ↓ +tsp compile + ↓ +openapi/openapi.yaml (32KB, source specification) +``` + +### OpenAPI to Go Models + +Generated Go code is created via Docker-based workflow: + +``` +openapi/openapi.yaml + ↓ +make generate (podman + openapi-generator-cli v7.16.0) + ↓ +pkg/api/openapi/model_*.go (Go model structs) +pkg/api/openapi/api/openapi.yaml (44KB, fully resolved spec) +``` + +**Generation process:** +1. `make generate` removes existing generated code +2. Builds Docker image with openapi-generator-cli +3. Runs code generator inside container +4. Copies generated files to host + +**Generated artifacts:** +- Model structs with JSON tags +- Type definitions for all API resources +- Validation tags for required fields +- Fully resolved OpenAPI specification + +**Important**: Generated files in `pkg/api/openapi/` are not tracked in git. Developers must run `make generate` after cloning or pulling changes to the OpenAPI specification. + +### Runtime Embedding + +The fully resolved OpenAPI specification is embedded at compile time using Go 1.16+ `//go:embed`: + +```go +// pkg/api/openapi_embed.go +//go:embed openapi/api/openapi.yaml +var openapiFS embed.FS + +func GetOpenAPISpec() ([]byte, error) { + return fs.ReadFile(openapiFS, "openapi/api/openapi.yaml") +} +``` + +This embedded specification is: +- Compiled into the binary +- Served at `/openapi` endpoint +- Used by Swagger UI at `/openapi-ui` +- Zero runtime file I/O required + +## Database Schema + +### Core Tables + +**clusters** +- Primary resources for cluster management +- Includes spec (region, version, nodes) +- Stores metadata (labels, generation) +- Tracks created_by, updated_by + +**node_pools** +- Child resources owned by clusters +- Contains spec (instance_type, replicas, disk_size) +- Maintains owner_id foreign key to clusters +- Soft delete support + +**adapter_statuses** +- Polymorphic status records +- owner_type: 'Cluster' or 'NodePool' +- owner_id: References clusters or node_pools +- Stores adapter name and conditions JSON +- Tracks observed_generation + +**labels** +- Key-value pairs for resource categorization +- owner_type and owner_id for polymorphic relationships +- Supports filtering and search + +## OpenAPI Specification Structure + +**Source file (`openapi/openapi.yaml` - 32KB):** +- TypeSpec compilation output +- Uses `$ref` for parameter reuse (78 references) +- Compact, maintainable structure +- Input for code generation + +**Generated file (`pkg/api/openapi/api/openapi.yaml` - 44KB):** +- openapi-generator output +- Fully resolved (no external `$ref`) +- Inline parameter definitions (54 references) +- Includes server configuration +- Embedded in Go binary + +**Key differences:** +- Source file: Optimized for maintainability +- Generated file: Optimized for runtime serving + +## Build Commands + +```bash +# Generate OpenAPI client code +make generate + +# Build binary +make binary + +# Run database migrations +./hyperfleet-api migrate + +# Start server (no auth) +make run-no-auth + +# Run tests +make test +make test-integration + +# Database management +make db/setup # Create PostgreSQL container +make db/teardown # Remove PostgreSQL container +make db/login # Connect to database shell +``` + +## API Authentication + +**Development mode (no auth):** +```bash +make run-no-auth +curl http://localhost:8000/api/hyperfleet/v1/clusters +``` + +**Production mode (OCM auth):** +```bash +make run +ocm login --token=${OCM_ACCESS_TOKEN} --url=http://localhost:8000 +ocm get /api/hyperfleet/v1/clusters +``` + +## Example Usage + +### Create Cluster and NodePool + +```bash +# 1. Create cluster +CLUSTER=$(curl -s -X POST http://localhost:8000/api/hyperfleet/v1/clusters \ + -H "Content-Type: application/json" \ + -d '{ + "kind": "Cluster", + "name": "production-cluster", + "spec": { + "region": "us-east-1", + "version": "4.16", + "nodes": 5 + }, + "labels": { + "env": "production", + "team": "platform" + } + }') + +CLUSTER_ID=$(echo $CLUSTER | jq -r '.id') + +# 2. Create node pool +curl -X POST http://localhost:8000/api/hyperfleet/v1/clusters/$CLUSTER_ID/nodepools \ + -H "Content-Type: application/json" \ + -d '{ + "kind": "NodePool", + "name": "worker-pool", + "spec": { + "instance_type": "m5.2xlarge", + "replicas": 10, + "disk_size": 200 + }, + "labels": { + "pool_type": "worker" + } + }' | jq + +# 3. Report adapter status +curl -X POST http://localhost:8000/api/hyperfleet/v1/clusters/$CLUSTER_ID/statuses \ + -H "Content-Type: application/json" \ + -d '{ + "adapter": "dns-adapter", + "observed_generation": 1, + "conditions": [ + { + "adapter": "dns-adapter", + "type": "Ready", + "status": "True", + "observed_generation": 1, + "reason": "ClusterProvisioned", + "message": "Cluster successfully provisioned", + "created_at": "2025-11-17T15:04:05Z", + "updated_at": "2025-11-17T15:04:05Z" + } + ] + }' | jq + +# 4. Get cluster with aggregated status +curl http://localhost:8000/api/hyperfleet/v1/clusters/$CLUSTER_ID | jq +``` + +## License + +[License information to be added] diff --git a/cmd/hyperfleet-api/environments/e_development.go b/cmd/hyperfleet-api/environments/e_development.go new file mode 100755 index 0000000..6aaa7a1 --- /dev/null +++ b/cmd/hyperfleet-api/environments/e_development.go @@ -0,0 +1,49 @@ +package environments + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_session" +) + +// devEnvImpl environment is intended for local use while developing features +type devEnvImpl struct { + env *Env +} + +var _ EnvironmentImpl = &devEnvImpl{} + +func (e *devEnvImpl) OverrideDatabase(c *Database) error { + c.SessionFactory = db_session.NewProdFactory(e.env.Config.Database) + return nil +} + +func (e *devEnvImpl) OverrideConfig(c *config.ApplicationConfig) error { + c.Server.EnableJWT = false + c.Server.EnableHTTPS = false + return nil +} + +func (e *devEnvImpl) OverrideServices(s *Services) error { + return nil +} + +func (e *devEnvImpl) OverrideHandlers(h *Handlers) error { + return nil +} + +func (e *devEnvImpl) OverrideClients(c *Clients) error { + return nil +} + +func (e *devEnvImpl) Flags() map[string]string { + return map[string]string{ + "v": "10", + "enable-authz": "false", + "ocm-debug": "false", + "enable-ocm-mock": "true", + "enable-https": "false", + "enable-metrics-https": "false", + "api-server-hostname": "localhost", + "api-server-bindaddress": "localhost:8000", + } +} diff --git a/cmd/hyperfleet-api/environments/e_integration_testing.go b/cmd/hyperfleet-api/environments/e_integration_testing.go new file mode 100755 index 0000000..8db7ea0 --- /dev/null +++ b/cmd/hyperfleet-api/environments/e_integration_testing.go @@ -0,0 +1,53 @@ +package environments + +import ( + "os" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_session" +) + +var _ EnvironmentImpl = &integrationTestingEnvImpl{} + +// integrationTestingEnvImpl is configuration for integration tests using testcontainers +type integrationTestingEnvImpl struct { + env *Env +} + +func (e *integrationTestingEnvImpl) OverrideDatabase(c *Database) error { + c.SessionFactory = db_session.NewTestcontainerFactory(e.env.Config.Database) + return nil +} + +func (e *integrationTestingEnvImpl) OverrideConfig(c *config.ApplicationConfig) error { + // Support a one-off env to allow enabling db debug in testing + if os.Getenv("DB_DEBUG") == "true" { + c.Database.Debug = true + } + return nil +} + +func (e *integrationTestingEnvImpl) OverrideServices(s *Services) error { + return nil +} + +func (e *integrationTestingEnvImpl) OverrideHandlers(h *Handlers) error { + return nil +} + +func (e *integrationTestingEnvImpl) OverrideClients(c *Clients) error { + return nil +} + +func (e *integrationTestingEnvImpl) Flags() map[string]string { + return map[string]string{ + "v": "0", + "logtostderr": "true", + "ocm-base-url": "https://api.integration.openshift.com", + "enable-https": "false", + "enable-metrics-https": "false", + "enable-authz": "true", + "ocm-debug": "false", + "enable-ocm-mock": "true", + } +} diff --git a/cmd/hyperfleet-api/environments/e_production.go b/cmd/hyperfleet-api/environments/e_production.go new file mode 100755 index 0000000..05151e2 --- /dev/null +++ b/cmd/hyperfleet-api/environments/e_production.go @@ -0,0 +1,42 @@ +package environments + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_session" +) + +var _ EnvironmentImpl = &productionEnvImpl{} + +// productionEnvImpl is any deployed instance of the service through app-interface +type productionEnvImpl struct { + env *Env +} + +func (e *productionEnvImpl) OverrideDatabase(c *Database) error { + c.SessionFactory = db_session.NewProdFactory(e.env.Config.Database) + return nil +} + +func (e *productionEnvImpl) OverrideConfig(c *config.ApplicationConfig) error { + return nil +} + +func (e *productionEnvImpl) OverrideServices(s *Services) error { + return nil +} + +func (e *productionEnvImpl) OverrideHandlers(h *Handlers) error { + return nil +} + +func (e *productionEnvImpl) OverrideClients(c *Clients) error { + return nil +} + +func (e *productionEnvImpl) Flags() map[string]string { + return map[string]string{ + "v": "1", + "ocm-debug": "false", + "enable-ocm-mock": "false", + } +} diff --git a/cmd/hyperfleet-api/environments/e_unit_testing.go b/cmd/hyperfleet-api/environments/e_unit_testing.go new file mode 100755 index 0000000..81127e3 --- /dev/null +++ b/cmd/hyperfleet-api/environments/e_unit_testing.go @@ -0,0 +1,53 @@ +package environments + +import ( + "os" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + dbmocks "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/mocks" +) + +var _ EnvironmentImpl = &unitTestingEnvImpl{} + +// unitTestingEnvImpl is configuration for unit tests using mocked database +type unitTestingEnvImpl struct { + env *Env +} + +func (e *unitTestingEnvImpl) OverrideDatabase(c *Database) error { + c.SessionFactory = dbmocks.NewMockSessionFactory() + return nil +} + +func (e *unitTestingEnvImpl) OverrideConfig(c *config.ApplicationConfig) error { + // Support a one-off env to allow enabling db debug in testing + if os.Getenv("DB_DEBUG") == "true" { + c.Database.Debug = true + } + return nil +} + +func (e *unitTestingEnvImpl) OverrideServices(s *Services) error { + return nil +} + +func (e *unitTestingEnvImpl) OverrideHandlers(h *Handlers) error { + return nil +} + +func (e *unitTestingEnvImpl) OverrideClients(c *Clients) error { + return nil +} + +func (e *unitTestingEnvImpl) Flags() map[string]string { + return map[string]string{ + "v": "0", + "logtostderr": "true", + "ocm-base-url": "https://api.integration.openshift.com", + "enable-https": "false", + "enable-metrics-https": "false", + "enable-authz": "true", + "ocm-debug": "false", + "enable-ocm-mock": "true", + } +} diff --git a/cmd/hyperfleet-api/environments/framework.go b/cmd/hyperfleet-api/environments/framework.go new file mode 100755 index 0000000..4ae23c6 --- /dev/null +++ b/cmd/hyperfleet-api/environments/framework.go @@ -0,0 +1,169 @@ +package environments + +import ( + "os" + "strings" + + "github.com/golang/glog" + "github.com/spf13/pflag" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments/registry" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/client/ocm" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +func init() { + once.Do(func() { + environment = &Env{} + + // Create the configuration + environment.Config = config.NewApplicationConfig() + environment.Name = GetEnvironmentStrFromEnv() + + environments = map[string]EnvironmentImpl{ + DevelopmentEnv: &devEnvImpl{environment}, + UnitTestingEnv: &unitTestingEnvImpl{environment}, + IntegrationTestingEnv: &integrationTestingEnvImpl{environment}, + ProductionEnv: &productionEnvImpl{environment}, + } + }) +} + +// EnvironmentImpl defines a set of behaviors for an OCM environment. +// Each environment provides a set of flags for basic set/override of the environment +// and configuration functions for each component type. +type EnvironmentImpl interface { + Flags() map[string]string + OverrideConfig(c *config.ApplicationConfig) error + OverrideServices(s *Services) error + OverrideDatabase(s *Database) error + OverrideHandlers(c *Handlers) error + OverrideClients(c *Clients) error +} + +func GetEnvironmentStrFromEnv() string { + envStr, specified := os.LookupEnv(EnvironmentStringKey) + if !specified || envStr == "" { + envStr = EnvironmentDefault + } + return envStr +} + +func Environment() *Env { + return environment +} + +// AddFlags Adds environment flags, using the environment's config struct, to the flagset 'flags' +func (e *Env) AddFlags(flags *pflag.FlagSet) error { + e.Config.AddFlags(flags) + return setConfigDefaults(flags, environments[e.Name].Flags()) +} + +// Initialize loads the environment's resources +// This should be called after the e.Config has been set appropriately though AddFlags and pasing, done elsewhere +// The environment does NOT handle flag parsing +func (e *Env) Initialize() error { + glog.Infof("Initializing %s environment", e.Name) + + envImpl, found := environments[e.Name] + if !found { + glog.Fatalf("Unknown runtime environment: %s", e.Name) + } + + if err := envImpl.OverrideConfig(e.Config); err != nil { + glog.Fatalf("Failed to configure ApplicationConfig: %s", err) + } + + messages := environment.Config.ReadFiles() + if len(messages) != 0 { + glog.Fatalf("unable to read configuration files:\n%s", strings.Join(messages, "\n")) + } + + // each env will set db explicitly because the DB impl has a `once` init section + if err := envImpl.OverrideDatabase(&e.Database); err != nil { + glog.Fatalf("Failed to configure Database: %s", err) + } + + err := e.LoadClients() + if err != nil { + return err + } + if err := envImpl.OverrideClients(&e.Clients); err != nil { + glog.Fatalf("Failed to configure Clients: %s", err) + } + + e.LoadServices() + if err := envImpl.OverrideServices(&e.Services); err != nil { + glog.Fatalf("Failed to configure Services: %s", err) + } + + seedErr := e.Seed() + if seedErr != nil { + return seedErr + } + + if err := envImpl.OverrideHandlers(&e.Handlers); err != nil { + glog.Fatalf("Failed to configure Handlers: %s", err) + } + + return nil +} + +func (e *Env) Seed() *errors.ServiceError { + return nil +} + +func (e *Env) LoadServices() { + // Initialize the service registry map + e.Services.serviceRegistry = make(map[string]interface{}) + + // Auto-discovered services (no manual editing needed) + registry.LoadDiscoveredServices(&e.Services, e) +} + +func (e *Env) LoadClients() error { + var err error + + ocmConfig := ocm.Config{ + BaseURL: e.Config.OCM.BaseURL, + ClientID: e.Config.OCM.ClientID, + ClientSecret: e.Config.OCM.ClientSecret, + SelfToken: e.Config.OCM.SelfToken, + TokenURL: e.Config.OCM.TokenURL, + Debug: e.Config.OCM.Debug, + } + + // Create OCM Authz client + if e.Config.OCM.EnableMock { + glog.Infof("Using Mock OCM Authz Client") + e.Clients.OCM, err = ocm.NewClientMock(ocmConfig) + } else { + e.Clients.OCM, err = ocm.NewClient(ocmConfig) + } + if err != nil { + glog.Errorf("Unable to create OCM Authz client: %s", err.Error()) + return err + } + + return nil +} + +func (e *Env) Teardown() { + if e.Database.SessionFactory != nil { + if err := e.Database.SessionFactory.Close(); err != nil { + glog.Errorf("Error closing database session factory: %s", err.Error()) + } + } + e.Clients.OCM.Close() +} + +func setConfigDefaults(flags *pflag.FlagSet, defaults map[string]string) error { + for name, value := range defaults { + if err := flags.Set(name, value); err != nil { + glog.Errorf("Error setting flag %s: %v", name, err) + return err + } + } + return nil +} diff --git a/cmd/hyperfleet-api/environments/framework_test.go b/cmd/hyperfleet-api/environments/framework_test.go new file mode 100755 index 0000000..06c56fe --- /dev/null +++ b/cmd/hyperfleet-api/environments/framework_test.go @@ -0,0 +1,56 @@ +package environments + +import ( + "os/exec" + "reflect" + "testing" + + "github.com/spf13/pflag" +) + +func BenchmarkGetDynos(b *testing.B) { + b.ReportAllocs() + fn := func(b *testing.B) { + cmd := exec.Command("ocm", "get", "/api/hyperfleet/v1/clusters", "params='size=2'") + _, err := cmd.CombinedOutput() + if err != nil { + b.Errorf("ERROR %+v", err) + } + } + for n := 0; n < b.N; n++ { + fn(b) + } +} + +func TestLoadServices(t *testing.T) { + env := Environment() + err := env.AddFlags(pflag.CommandLine) + if err != nil { + t.Errorf("Unable to add flags for testing environment: %s", err.Error()) + return + } + pflag.Parse() + err = env.Initialize() + if err != nil { + t.Errorf("Unable to load testing environment: %s", err.Error()) + return + } + + s := reflect.ValueOf(env.Services) + sType := s.Type() + + for i := 0; i < s.NumField(); i++ { + field := s.Field(i) + fieldType := sType.Field(i) + + // Skip unexported fields (lowercase first letter) + if !fieldType.IsExported() { + continue + } + + // Only check fields that are function types (service locators) + if field.Kind() == reflect.Func && field.IsNil() { + t.Errorf("Service locator %s is nil", fieldType.Name) + } + } +} diff --git a/cmd/hyperfleet-api/environments/registry/registry.go b/cmd/hyperfleet-api/environments/registry/registry.go new file mode 100755 index 0000000..9c64a1d --- /dev/null +++ b/cmd/hyperfleet-api/environments/registry/registry.go @@ -0,0 +1,42 @@ +package registry + +import ( + "sync" +) + +// ServiceLocatorFunc is a function that creates a service locator +type ServiceLocatorFunc func(env interface{}) interface{} + +// ServiceRegistry holds registered services +type ServiceRegistry struct { + mu sync.RWMutex + services map[string]ServiceLocatorFunc +} + +var globalRegistry = &ServiceRegistry{ + services: make(map[string]ServiceLocatorFunc), +} + +// RegisterService registers a service with the global registry +func RegisterService(name string, locatorFunc ServiceLocatorFunc) { + globalRegistry.mu.Lock() + defer globalRegistry.mu.Unlock() + globalRegistry.services[name] = locatorFunc +} + +// ServicesInterface defines the interface for the Services struct +type ServicesInterface interface { + SetService(name string, service interface{}) +} + +// LoadDiscoveredServices loads all registered services into the Services struct +func LoadDiscoveredServices(services ServicesInterface, env interface{}) { + globalRegistry.mu.RLock() + defer globalRegistry.mu.RUnlock() + + for name, locatorFunc := range globalRegistry.services { + // Call the locator function to create the service and store it in the registry + serviceLocator := locatorFunc(env) + services.SetService(name, serviceLocator) + } +} diff --git a/cmd/hyperfleet-api/environments/types.go b/cmd/hyperfleet-api/environments/types.go new file mode 100755 index 0000000..d1d0c93 --- /dev/null +++ b/cmd/hyperfleet-api/environments/types.go @@ -0,0 +1,83 @@ +package environments + +import ( + "sync" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/auth" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/client/ocm" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +const ( + UnitTestingEnv string = "unit_testing" + IntegrationTestingEnv string = "integration_testing" + DevelopmentEnv string = "development" + ProductionEnv string = "production" + + EnvironmentStringKey string = "OCM_ENV" + EnvironmentDefault = DevelopmentEnv +) + +type Env struct { + Name string + Services Services + Handlers Handlers + Clients Clients + Database Database + // most code relies on env.Config + Config *config.ApplicationConfig +} + +type ApplicationConfig struct { + ApplicationConfig *config.ApplicationConfig +} + +type Database struct { + SessionFactory db.SessionFactory +} + +type Handlers struct { + AuthMiddleware auth.JWTMiddleware +} + +type Services struct { + serviceRegistry map[string]interface{} + mutex sync.RWMutex +} + +func (s *Services) GetService(name string) interface{} { + s.mutex.RLock() + defer s.mutex.RUnlock() + if s.serviceRegistry == nil { + return nil + } + return s.serviceRegistry[name] +} + +func (s *Services) SetService(name string, service interface{}) { + s.mutex.Lock() + defer s.mutex.Unlock() + if s.serviceRegistry == nil { + s.serviceRegistry = make(map[string]interface{}) + } + s.serviceRegistry[name] = service +} + +type Clients struct { + OCM *ocm.Client +} + +type ConfigDefaults struct { + Server map[string]interface{} + Metrics map[string]interface{} + Database map[string]interface{} + OCM map[string]interface{} + Options map[string]interface{} +} + +var ( + environment *Env + once sync.Once + environments map[string]EnvironmentImpl +) diff --git a/cmd/hyperfleet-api/main.go b/cmd/hyperfleet-api/main.go new file mode 100755 index 0000000..19a42f9 --- /dev/null +++ b/cmd/hyperfleet-api/main.go @@ -0,0 +1,52 @@ +package main + +import ( + "flag" + + "github.com/golang/glog" + "github.com/spf13/cobra" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/migrate" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/servecmd" + + // Import plugins to trigger their init() functions + // _ "github.com/openshift-hyperfleet/hyperfleet-api/plugins/events" // REMOVED: Events plugin no longer exists + _ "github.com/openshift-hyperfleet/hyperfleet-api/plugins/generic" + _ "github.com/openshift-hyperfleet/hyperfleet-api/plugins/adapterStatus" + _ "github.com/openshift-hyperfleet/hyperfleet-api/plugins/clusters" + _ "github.com/openshift-hyperfleet/hyperfleet-api/plugins/nodePools" +) + +// nolint +// +//go:generate go-bindata -o ../../data/generated/openapi/openapi.go -pkg openapi -prefix ../../openapi/ ../../openapi + +func main() { + // This is needed to make `glog` believe that the flags have already been parsed, otherwise + // every log messages is prefixed by an error message stating the the flags haven't been + // parsed. + _ = flag.CommandLine.Parse([]string{}) + + //pflag.CommandLine.AddGoFlagSet(flag.CommandLine) + + // Always log to stderr by default + if err := flag.Set("logtostderr", "true"); err != nil { + glog.Infof("Unable to set logtostderr to true") + } + + rootCmd := &cobra.Command{ + Use: "hyperfleet", + Long: "hyperfleet serves as a template for new microservices", + } + + // All subcommands under root + migrateCmd := migrate.NewMigrateCommand() + serveCmd := servecmd.NewServeCommand() + + // Add subcommand(s) + rootCmd.AddCommand(migrateCmd, serveCmd) + + if err := rootCmd.Execute(); err != nil { + glog.Fatalf("error running command: %v", err) + } +} diff --git a/cmd/hyperfleet-api/migrate/cmd.go b/cmd/hyperfleet-api/migrate/cmd.go new file mode 100755 index 0000000..adcbb71 --- /dev/null +++ b/cmd/hyperfleet-api/migrate/cmd.go @@ -0,0 +1,41 @@ +package migrate + +import ( + "context" + "flag" + + "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_session" + "github.com/spf13/cobra" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +var dbConfig = config.NewDatabaseConfig() + +// NewMigrateCommand migrate sub-command handles running migrations +func NewMigrateCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "migrate", + Short: "Run hyperfleet service data migrations", + Long: "Run hyperfleet service data migrations", + Run: runMigrate, + } + + dbConfig.AddFlags(cmd.PersistentFlags()) + cmd.PersistentFlags().AddGoFlagSet(flag.CommandLine) + return cmd +} + +func runMigrate(_ *cobra.Command, _ []string) { + err := dbConfig.ReadFiles() + if err != nil { + glog.Fatal(err) + } + + connection := db_session.NewProdFactory(dbConfig) + if err := db.Migrate(connection.New(context.Background())); err != nil { + glog.Fatal(err) + } +} diff --git a/cmd/hyperfleet-api/servecmd/cmd.go b/cmd/hyperfleet-api/servecmd/cmd.go new file mode 100755 index 0000000..dce3fd3 --- /dev/null +++ b/cmd/hyperfleet-api/servecmd/cmd.go @@ -0,0 +1,52 @@ +package servecmd + +import ( + "github.com/golang/glog" + "github.com/spf13/cobra" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server" +) + +func NewServeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "serve", + Short: "Serve the hyperfleet", + Long: "Serve the hyperfleet.", + Run: runServe, + } + err := environments.Environment().AddFlags(cmd.PersistentFlags()) + if err != nil { + glog.Fatalf("Unable to add environment flags to serve command: %s", err.Error()) + } + + return cmd +} + +func runServe(cmd *cobra.Command, args []string) { + err := environments.Environment().Initialize() + if err != nil { + glog.Fatalf("Unable to initialize environment: %s", err.Error()) + } + + // Run the servers + go func() { + apiserver := server.NewAPIServer() + apiserver.Start() + }() + + go func() { + metricsServer := server.NewMetricsServer() + metricsServer.Start() + }() + + go func() { + healthcheckServer := server.NewHealthCheckServer() + healthcheckServer.Start() + }() + + // REMOVED: ControllersServer - Sentinel handles orchestration + // Controllers are no longer run inside the API service + + select {} +} diff --git a/cmd/hyperfleet-api/server/api_server.go b/cmd/hyperfleet-api/server/api_server.go new file mode 100755 index 0000000..dd84ea6 --- /dev/null +++ b/cmd/hyperfleet-api/server/api_server.go @@ -0,0 +1,154 @@ +package server + +import ( + "context" + "fmt" + "net" + "net/http" + "time" + + _ "github.com/auth0/go-jwt-middleware" + _ "github.com/golang-jwt/jwt/v4" + "github.com/golang/glog" + gorillahandlers "github.com/gorilla/handlers" + sdk "github.com/openshift-online/ocm-sdk-go" + "github.com/openshift-online/ocm-sdk-go/authentication" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" +) + +type apiServer struct { + httpServer *http.Server +} + +var _ Server = &apiServer{} + +func env() *environments.Env { + return environments.Environment() +} + +func NewAPIServer() Server { + s := &apiServer{} + + mainRouter := s.routes() + + // referring to the router as type http.Handler allows us to add middleware via more handlers + var mainHandler http.Handler = mainRouter + + if env().Config.Server.EnableJWT { + // Create the logger for the authentication handler: + authnLogger, err := sdk.NewGlogLoggerBuilder(). + InfoV(glog.Level(1)). + DebugV(glog.Level(5)). + Build() + check(err, "Unable to create authentication logger") + + // Create the handler that verifies that tokens are valid: + mainHandler, err = authentication.NewHandler(). + Logger(authnLogger). + KeysFile(env().Config.Server.JwkCertFile). + KeysURL(env().Config.Server.JwkCertURL). + ACLFile(env().Config.Server.ACLFile). + Public("^/api/hyperfleet/?$"). + Public("^/api/hyperfleet/v1/?$"). + Public("^/api/hyperfleet/v1/openapi/?$"). + Public("^/api/hyperfleet/v1/openapi.html/?$"). + Public("^/api/hyperfleet/v1/errors(/.*)?$"). + Next(mainHandler). + Build() + check(err, "Unable to create authentication handler") + } + + // Configure CORS for Red Hat console and API access + mainHandler = gorillahandlers.CORS( + gorillahandlers.AllowedOrigins([]string{ + // OCM UI local development URLs + "https://qa.foo.redhat.com:1337", + "https://prod.foo.redhat.com:1337", + "https://ci.foo.redhat.com:1337", + // Production and staging console URLs + "https://console.redhat.com", + "https://qaprodauth.console.redhat.com", + "https://qa.console.redhat.com", + "https://ci.console.redhat.com", + "https://console.stage.redhat.com", + // API docs UI + "https://api.stage.openshift.com", + "https://api.openshift.com", + // Customer portal + "https://access.qa.redhat.com", + "https://access.stage.redhat.com", + "https://access.redhat.com", + }), + gorillahandlers.AllowedMethods([]string{ + http.MethodDelete, + http.MethodGet, + http.MethodPatch, + http.MethodPost, + }), + gorillahandlers.AllowedHeaders([]string{ + "Authorization", + "Content-Type", + }), + gorillahandlers.MaxAge(int((10 * time.Minute).Seconds())), + )(mainHandler) + + mainHandler = removeTrailingSlash(mainHandler) + + s.httpServer = &http.Server{ + Addr: env().Config.Server.BindAddress, + Handler: mainHandler, + } + + return s +} + +// Serve start the blocking call to Serve. +// Useful for breaking up ListenAndServer (Start) when you require the server to be listening before continuing +func (s apiServer) Serve(listener net.Listener) { + var err error + if env().Config.Server.EnableHTTPS { + // Check https cert and key path path + if env().Config.Server.HTTPSCertFile == "" || env().Config.Server.HTTPSKeyFile == "" { + check( + fmt.Errorf("unspecified required --https-cert-file, --https-key-file"), + "Can't start https server", + ) + } + + // Serve with TLS + glog.Infof("Serving with TLS at %s", env().Config.Server.BindAddress) + err = s.httpServer.ServeTLS(listener, env().Config.Server.HTTPSCertFile, env().Config.Server.HTTPSKeyFile) + } else { + glog.Infof("Serving without TLS at %s", env().Config.Server.BindAddress) + err = s.httpServer.Serve(listener) + } + + // Web server terminated. + check(err, "Web server terminated with errors") + glog.Info("Web server terminated") +} + +// Listen only start the listener, not the server. +// Useful for breaking up ListenAndServer (Start) when you require the server to be listening before continuing +func (s apiServer) Listen() (listener net.Listener, err error) { + return net.Listen("tcp", env().Config.Server.BindAddress) +} + +// Start listening on the configured port and start the server. This is a convenience wrapper for Listen() and Serve(listener Listener) +func (s apiServer) Start() { + listener, err := s.Listen() + if err != nil { + glog.Fatalf("Unable to start API server: %s", err) + } + s.Serve(listener) + + // after the server exits but before the application terminates + // we need to explicitly close Go's sql connection pool. + // this needs to be called *exactly* once during an app's lifetime. + env().Database.SessionFactory.Close() +} + +func (s apiServer) Stop() error { + return s.httpServer.Shutdown(context.Background()) +} diff --git a/cmd/hyperfleet-api/server/healthcheck_server.go b/cmd/hyperfleet-api/server/healthcheck_server.go new file mode 100755 index 0000000..f677234 --- /dev/null +++ b/cmd/hyperfleet-api/server/healthcheck_server.go @@ -0,0 +1,82 @@ +package server + +import ( + "context" + "fmt" + "net" + "net/http" + + health "github.com/docker/go-healthcheck" + "github.com/golang/glog" + "github.com/gorilla/mux" +) + +var ( + updater = health.NewStatusUpdater() +) + +var _ Server = &healthCheckServer{} + +type healthCheckServer struct { + httpServer *http.Server +} + +func NewHealthCheckServer() *healthCheckServer { + router := mux.NewRouter() + health.DefaultRegistry = health.NewRegistry() + health.Register("maintenance_status", updater) + router.HandleFunc("/healthcheck", health.StatusHandler).Methods(http.MethodGet) + router.HandleFunc("/healthcheck/down", downHandler).Methods(http.MethodPost) + router.HandleFunc("/healthcheck/up", upHandler).Methods(http.MethodPost) + + srv := &http.Server{ + Handler: router, + Addr: env().Config.HealthCheck.BindAddress, + } + + return &healthCheckServer{ + httpServer: srv, + } +} + +func (s healthCheckServer) Start() { + var err error + if env().Config.HealthCheck.EnableHTTPS { + if env().Config.Server.HTTPSCertFile == "" || env().Config.Server.HTTPSKeyFile == "" { + check( + fmt.Errorf("unspecified required --https-cert-file, --https-key-file"), + "Can't start https server", + ) + } + + // Serve with TLS + glog.Infof("Serving HealthCheck with TLS at %s", env().Config.HealthCheck.BindAddress) + err = s.httpServer.ListenAndServeTLS(env().Config.Server.HTTPSCertFile, env().Config.Server.HTTPSKeyFile) + } else { + glog.Infof("Serving HealthCheck without TLS at %s", env().Config.HealthCheck.BindAddress) + err = s.httpServer.ListenAndServe() + } + check(err, "HealthCheck server terminated with errors") + glog.Infof("HealthCheck server terminated") +} + +func (s healthCheckServer) Stop() error { + return s.httpServer.Shutdown(context.Background()) +} + +// Listen Unimplemented +func (s healthCheckServer) Listen() (listener net.Listener, err error) { + return nil, nil +} + +// Serve Unimplemented +func (s healthCheckServer) Serve(listener net.Listener) { +} + +func upHandler(w http.ResponseWriter, r *http.Request) { + updater.Update(nil) +} + +func downHandler(w http.ResponseWriter, r *http.Request) { + updater.Update(fmt.Errorf("maintenance mode")) +} diff --git a/cmd/hyperfleet-api/server/logging/formatter.go b/cmd/hyperfleet-api/server/logging/formatter.go new file mode 100755 index 0000000..547bcef --- /dev/null +++ b/cmd/hyperfleet-api/server/logging/formatter.go @@ -0,0 +1,8 @@ +package logging + +import "net/http" + +type LogFormatter interface { + FormatRequestLog(request *http.Request) (string, error) + FormatResponseLog(responseInfo *ResponseInfo) (string, error) +} diff --git a/cmd/hyperfleet-api/server/logging/formatter_json.go b/cmd/hyperfleet-api/server/logging/formatter_json.go new file mode 100755 index 0000000..9d0062c --- /dev/null +++ b/cmd/hyperfleet-api/server/logging/formatter_json.go @@ -0,0 +1,62 @@ +package logging + +import ( + "encoding/json" + "io" + "net/http" + + "github.com/golang/glog" +) + +func NewJSONLogFormatter() *jsonLogFormatter { + return &jsonLogFormatter{} +} + +type jsonLogFormatter struct{} + +var _ LogFormatter = &jsonLogFormatter{} + +func (f *jsonLogFormatter) FormatRequestLog(r *http.Request) (string, error) { + jsonlog := jsonRequestLog{ + Method: r.Method, + RequestURI: r.RequestURI, + RemoteAddr: r.RemoteAddr, + } + if glog.V(10) { + jsonlog.Header = r.Header + jsonlog.Body = r.Body + } + + log, err := json.Marshal(jsonlog) + if err != nil { + return "", err + } + return string(log[:]), nil +} + +func (f *jsonLogFormatter) FormatResponseLog(info *ResponseInfo) (string, error) { + jsonlog := jsonResponseLog{Header: nil, Status: info.Status, Elapsed: info.Elapsed} + if glog.V(10) { + jsonlog.Body = string(info.Body[:]) + } + log, err := json.Marshal(jsonlog) + if err != nil { + return "", err + } + return string(log[:]), nil +} + +type jsonRequestLog struct { + Method string `json:"request_method"` + RequestURI string `json:"request_url"` + Header http.Header `json:"request_header,omitempty"` + Body io.ReadCloser `json:"request_body,omitempty"` + RemoteAddr string `json:"request_remote_ip,omitempty"` +} + +type jsonResponseLog struct { + Header http.Header `json:"response_header,omitempty"` + Status int `json:"response_status,omitempty"` + Body string `json:"response_body,omitempty"` + Elapsed string `json:"elapsed,omitempty"` +} diff --git a/cmd/hyperfleet-api/server/logging/logging.go b/cmd/hyperfleet-api/server/logging/logging.go new file mode 100755 index 0000000..1c889df --- /dev/null +++ b/cmd/hyperfleet-api/server/logging/logging.go @@ -0,0 +1,3 @@ +package logging + +const Threshold int32 = 1 diff --git a/cmd/hyperfleet-api/server/logging/request_logging_middleware.go b/cmd/hyperfleet-api/server/logging/request_logging_middleware.go new file mode 100755 index 0000000..44a0f8c --- /dev/null +++ b/cmd/hyperfleet-api/server/logging/request_logging_middleware.go @@ -0,0 +1,34 @@ +package logging + +import ( + "net/http" + "strings" + "time" +) + +func RequestLoggingMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(writer http.ResponseWriter, request *http.Request) { + path := strings.TrimSuffix(request.URL.Path, "/") + doLog := true + + // these contribute greatly to log spam but are not useful or meaningful. + // consider a list/map of URLs should this grow in the future. + if path == "/api/hyperfleet" { + doLog = false + } + + loggingWriter := NewLoggingWriter(writer, request, NewJSONLogFormatter()) + + if doLog { + loggingWriter.log(loggingWriter.prepareRequestLog()) + } + + before := time.Now() + handler.ServeHTTP(loggingWriter, request) + elapsed := time.Since(before).String() + + if doLog { + loggingWriter.log(loggingWriter.prepareResponseLog(elapsed)) + } + }) +} diff --git a/cmd/hyperfleet-api/server/logging/responseinfo.go b/cmd/hyperfleet-api/server/logging/responseinfo.go new file mode 100755 index 0000000..089a120 --- /dev/null +++ b/cmd/hyperfleet-api/server/logging/responseinfo.go @@ -0,0 +1,10 @@ +package logging + +import "net/http" + +type ResponseInfo struct { + Header http.Header `json:"response_header,omitempty"` + Body []byte `json:"response_body,omitempty"` + Status int `json:"response_status,omitempty"` + Elapsed string `json:"elapsed,omitempty"` +} diff --git a/cmd/hyperfleet-api/server/logging/writer.go b/cmd/hyperfleet-api/server/logging/writer.go new file mode 100755 index 0000000..122aaf6 --- /dev/null +++ b/cmd/hyperfleet-api/server/logging/writer.go @@ -0,0 +1,54 @@ +package logging + +import ( + "net/http" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +func NewLoggingWriter(w http.ResponseWriter, r *http.Request, f LogFormatter) *loggingWriter { + return &loggingWriter{ResponseWriter: w, request: r, formatter: f} +} + +type loggingWriter struct { + http.ResponseWriter + request *http.Request + formatter LogFormatter + responseStatus int + responseBody []byte +} + +func (writer *loggingWriter) Write(body []byte) (int, error) { + writer.responseBody = body + return writer.ResponseWriter.Write(body) +} + +func (writer *loggingWriter) WriteHeader(status int) { + writer.responseStatus = status + writer.ResponseWriter.WriteHeader(status) +} + +func (writer *loggingWriter) log(logMsg string, err error) { + log := logger.NewOCMLogger(writer.request.Context()) + switch err { + case nil: + log.V(Threshold).Infof(logMsg) + default: + log.Extra("error", err.Error()).Error("Unable to format request/response for log.") + } +} + +func (writer *loggingWriter) prepareRequestLog() (string, error) { + return writer.formatter.FormatRequestLog(writer.request) +} + +func (writer *loggingWriter) prepareResponseLog(elapsed string) (string, error) { + info := &ResponseInfo{ + Header: writer.ResponseWriter.Header(), + Body: writer.responseBody, + Status: writer.responseStatus, + Elapsed: elapsed, + } + + return writer.formatter.FormatResponseLog(info) +} diff --git a/cmd/hyperfleet-api/server/metrics_middleware.go b/cmd/hyperfleet-api/server/metrics_middleware.go new file mode 100755 index 0000000..448593f --- /dev/null +++ b/cmd/hyperfleet-api/server/metrics_middleware.go @@ -0,0 +1,197 @@ +/* +Copyright (c) 2019 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains an HTTP middleware that generates metrics about API requests: +// +// api_inbound_request_count - Number of requests served. +// api_inbound_request_duration_sum - Total time to process requests, in seconds. +// api_inbound_request_duration_count - Total number of requests measured. +// api_inbound_request_duration_bucket - Number of requests that processed in less than a given time. +// +// The duration buckets metrics contain an `le` label that indicates the upper. For example if the +// `le` label is `1` then the value will be the number of requests that were processed in less than +// one second. +// +// All the metrics have the following labels: +// +// method - Name of the HTTP method, for example GET or POST. +// path - Request path, for example /api/clusters_mgmt/v1/clusters. +// code - HTTP response code, for example 200 or 500. +// +// To calculate the average request duration during the last 10 minutes, for example, use a +// Prometheus expression like this: +// +// rate(api_inbound_request_duration_sum[10m]) / rate(api_inbound_request_duration_count[10m]) +// +// In order to reduce the cardinality of the metrics the path label is modified to remove the +// identifiers of the objects. For example, if the original path is .../clusters/123 then it will +// be replaced by .../clusters/-, and the values will be accumulated. The line returned by the +// metrics server will be like this: +// +// api_inbound_request_count{code="200",method="GET",path="/api/clusters_mgmt/v1/clusters/-"} 56 +// +// The meaning of that is that there were a total of 56 requests to get specific clusters, +// independently of the specific identifier of the cluster. + +package server + +import ( + "net/http" + "regexp" + "strconv" + "time" + + "github.com/gorilla/mux" + "github.com/prometheus/client_golang/prometheus" +) + +// MetricsMiddleware creates a new handler that collects metrics for the requests processed by the +// given handler. +func MetricsMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Wrap the origial response writer with one that will allow as to get the response + // status code: + wrapper := &metricsResponseWrapper{ + wrapped: w, + } + + // Call the next handler measuring the time that it takes: + before := time.Now() + handler.ServeHTTP(wrapper, r) + elapsed := time.Since(before) + + // In order to reduce the cardinality of the metrics we need to remove from the + // request path all the object identifiers: + path := "/" + PathVarSub + route := mux.CurrentRoute(r) + if route != nil { + template, err := route.GetPathTemplate() + if err == nil { + path = metricsPathVarRE.ReplaceAllString(template, PathVarSub) + } + } + + // Create the set of labels that we will add to all the requests: + labels := prometheus.Labels{ + metricsMethodLabel: r.Method, + metricsPathLabel: path, + metricsCodeLabel: strconv.Itoa(wrapper.code), + } + + // Update the metric containing the number of requests: + requestCountMetric.With(labels).Inc() + + // Update the metrics containing the response duration: + requestDurationMetric.With(labels).Observe(elapsed.Seconds()) + }) +} + +// ResetMetricCollectors resets all prometheus collectors +func ResetMetricCollectors() { + requestCountMetric.Reset() + requestDurationMetric.Reset() +} + +// Regular expression used to remove variables from route path templates: +var metricsPathVarRE = regexp.MustCompile(`{[^}]*}`) + +// PathVarSub replaces path variables to a same character +var PathVarSub = "-" + +// Subsystem used to define the metrics: +const metricsSubsystem = "api_inbound" + +// Names of the labels added to metrics: +const ( + metricsMethodLabel = "method" + metricsPathLabel = "path" + metricsCodeLabel = "code" +) + +// MetricsLabels - Array of labels added to metrics: +var MetricsLabels = []string{ + metricsMethodLabel, + metricsPathLabel, + metricsCodeLabel, +} + +// Names of the metrics: +const ( + requestCount = "request_count" + requestDuration = "request_duration" +) + +// MetricsNames - Array of Names of the metrics: +var MetricsNames = []string{ + requestCount, + requestDuration, +} + +// Description of the requests count metric: +var requestCountMetric = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: metricsSubsystem, + Name: requestCount, + Help: "Number of requests served.", + }, + MetricsLabels, +) + +// Description of the request duration metric: +var requestDurationMetric = prometheus.NewHistogramVec( + prometheus.HistogramOpts{ + Subsystem: metricsSubsystem, + Name: requestDuration, + Help: "Request duration in seconds.", + Buckets: []float64{ + 0.1, + 1.0, + 10.0, + 30.0, + }, + }, + MetricsLabels, +) + +// metricsResponseWrapper is an extension of the HTTP response writer that remembers the status code, +// so that we can add to metrics after the response is sent to the client. +type metricsResponseWrapper struct { + wrapped http.ResponseWriter + code int +} + +func (w *metricsResponseWrapper) Header() http.Header { + return w.wrapped.Header() +} + +func (w *metricsResponseWrapper) Write(b []byte) (n int, err error) { + if w.code == 0 { + w.code = http.StatusOK + } + n, err = w.wrapped.Write(b) + return +} + +func (w *metricsResponseWrapper) WriteHeader(code int) { + w.code = code + w.wrapped.WriteHeader(code) +} + +func init() { + // Register the metrics: + prometheus.MustRegister(requestCountMetric) + prometheus.MustRegister(requestDurationMetric) +} diff --git a/cmd/hyperfleet-api/server/metrics_server.go b/cmd/hyperfleet-api/server/metrics_server.go new file mode 100755 index 0000000..6fa14b0 --- /dev/null +++ b/cmd/hyperfleet-api/server/metrics_server.go @@ -0,0 +1,71 @@ +package server + +import ( + "context" + "fmt" + "net" + "net/http" + + "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/handlers" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +func NewMetricsServer() Server { + mainRouter := mux.NewRouter() + mainRouter.NotFoundHandler = http.HandlerFunc(api.SendNotFound) + + // metrics endpoint + prometheusMetricsHandler := handlers.NewPrometheusMetricsHandler() + mainRouter.Handle("/metrics", prometheusMetricsHandler.Handler()) + + var mainHandler http.Handler = mainRouter + + s := &metricsServer{} + s.httpServer = &http.Server{ + Addr: env().Config.Metrics.BindAddress, + Handler: mainHandler, + } + return s +} + +type metricsServer struct { + httpServer *http.Server +} + +var _ Server = &metricsServer{} + +func (s metricsServer) Listen() (listener net.Listener, err error) { + return nil, nil +} + +func (s metricsServer) Serve(listener net.Listener) { +} + +func (s metricsServer) Start() { + log := logger.NewOCMLogger(context.Background()) + var err error + if env().Config.Metrics.EnableHTTPS { + if env().Config.Server.HTTPSCertFile == "" || env().Config.Server.HTTPSKeyFile == "" { + check( + fmt.Errorf("unspecified required --https-cert-file, --https-key-file"), + "Can't start https server", + ) + } + + // Serve with TLS + log.Infof("Serving Metrics with TLS at %s", env().Config.Server.BindAddress) + err = s.httpServer.ListenAndServeTLS(env().Config.Server.HTTPSCertFile, env().Config.Server.HTTPSKeyFile) + } else { + log.Infof("Serving Metrics without TLS at %s", env().Config.Metrics.BindAddress) + err = s.httpServer.ListenAndServe() + } + check(err, "Metrics server terminated with errors") + log.Infof("Metrics server terminated") +} + +func (s metricsServer) Stop() error { + return s.httpServer.Shutdown(context.Background()) +} diff --git a/cmd/hyperfleet-api/server/routes.go b/cmd/hyperfleet-api/server/routes.go new file mode 100755 index 0000000..0a47969 --- /dev/null +++ b/cmd/hyperfleet-api/server/routes.go @@ -0,0 +1,104 @@ +package server + +import ( + "fmt" + "net/http" + + gorillahandlers "github.com/gorilla/handlers" + "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server/logging" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/auth" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/handlers" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +type ServicesInterface interface { + GetService(name string) interface{} +} + +type RouteRegistrationFunc func(apiV1Router *mux.Router, services ServicesInterface, authMiddleware auth.JWTMiddleware, authzMiddleware auth.AuthorizationMiddleware) + +var routeRegistry = make(map[string]RouteRegistrationFunc) + +func RegisterRoutes(name string, registrationFunc RouteRegistrationFunc) { + routeRegistry[name] = registrationFunc +} + +func LoadDiscoveredRoutes(apiV1Router *mux.Router, services ServicesInterface, authMiddleware auth.JWTMiddleware, authzMiddleware auth.AuthorizationMiddleware) { + for name, registrationFunc := range routeRegistry { + registrationFunc(apiV1Router, services, authMiddleware, authzMiddleware) + _ = name // prevent unused variable warning + } +} + +func (s *apiServer) routes() *mux.Router { + services := &env().Services + + metadataHandler := handlers.NewMetadataHandler() + + var authMiddleware auth.JWTMiddleware + authMiddleware = &auth.MiddlewareMock{} + if env().Config.Server.EnableJWT { + var err error + authMiddleware, err = auth.NewAuthMiddleware() + check(err, "Unable to create auth middleware") + } + if authMiddleware == nil { + check(fmt.Errorf("auth middleware is nil"), "Unable to create auth middleware: missing middleware") + } + + authzMiddleware := auth.NewAuthzMiddlewareMock() + if env().Config.Server.EnableAuthz { + // TODO: authzMiddleware, err = auth.NewAuthzMiddleware() + // check(err, "Unable to create authz middleware") + } + + // mainRouter is top level "/" + mainRouter := mux.NewRouter() + mainRouter.NotFoundHandler = http.HandlerFunc(api.SendNotFound) + + // Operation ID middleware sets a relatively unique operation ID in the context of each request for debugging purposes + mainRouter.Use(logger.OperationIDMiddleware) + + // Request logging middleware logs pertinent information about the request and response + mainRouter.Use(logging.RequestLoggingMiddleware) + + // /api/hyperfleet + apiRouter := mainRouter.PathPrefix("/api/hyperfleet").Subrouter() + apiRouter.HandleFunc("", metadataHandler.Get).Methods(http.MethodGet) + + // /api/hyperfleet/v1 + apiV1Router := apiRouter.PathPrefix("/v1").Subrouter() + + // /api/hyperfleet/v1/openapi + openapiHandler, err := handlers.NewOpenAPIHandler() + check(err, "Unable to create OpenAPI handler") + apiV1Router.HandleFunc("/openapi.html", openapiHandler.GetOpenAPIUI).Methods(http.MethodGet) + apiV1Router.HandleFunc("/openapi", openapiHandler.GetOpenAPI).Methods(http.MethodGet) + + // /api/hyperfleet/v1/compatibility + compatibilityHandler := handlers.NewCompatibilityHandler() + apiV1Router.HandleFunc("/compatibility", compatibilityHandler.Get).Methods(http.MethodGet) + + registerApiMiddleware(apiV1Router) + + // Auto-discovered routes (no manual editing needed) + LoadDiscoveredRoutes(apiV1Router, services, authMiddleware, authzMiddleware) + + return mainRouter +} + +func registerApiMiddleware(router *mux.Router) { + router.Use(MetricsMiddleware) + + router.Use( + func(next http.Handler) http.Handler { + return db.TransactionMiddleware(next, env().Database.SessionFactory) + }, + ) + + router.Use(gorillahandlers.CompressHandler) +} diff --git a/cmd/hyperfleet-api/server/server.go b/cmd/hyperfleet-api/server/server.go new file mode 100755 index 0000000..5d197f7 --- /dev/null +++ b/cmd/hyperfleet-api/server/server.go @@ -0,0 +1,32 @@ +package server + +import ( + "net" + "net/http" + "os" + "strings" + + "github.com/golang/glog" +) + +type Server interface { + Start() + Stop() error + Listen() (net.Listener, error) + Serve(net.Listener) +} + +func removeTrailingSlash(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + r.URL.Path = strings.TrimSuffix(r.URL.Path, "/") + next.ServeHTTP(w, r) + }) +} + +// Exit on error +func check(err error, msg string) { + if err != nil && err != http.ErrServerClosed { + glog.Errorf("%s: %s", msg, err) + os.Exit(1) + } +} diff --git a/data/generated/openapi/openapi.go b/data/generated/openapi/openapi.go new file mode 100755 index 0000000..b20c9c2 --- /dev/null +++ b/data/generated/openapi/openapi.go @@ -0,0 +1,244 @@ +// Code generated for package openapi by go-bindata DO NOT EDIT. (@generated) +// sources: +// ../../openapi/openapi.yaml +package openapi + +import ( + "bytes" + "compress/gzip" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "time" +) + +func bindataRead(data []byte, name string) ([]byte, error) { + gz, err := gzip.NewReader(bytes.NewBuffer(data)) + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + + var buf bytes.Buffer + _, err = io.Copy(&buf, gz) + clErr := gz.Close() + + if err != nil { + return nil, fmt.Errorf("Read %q: %v", name, err) + } + if clErr != nil { + return nil, err + } + + return buf.Bytes(), nil +} + +type asset struct { + bytes []byte + info os.FileInfo +} + +type bindataFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +// Name return file name +func (fi bindataFileInfo) Name() string { + return fi.name +} + +// Size return file size +func (fi bindataFileInfo) Size() int64 { + return fi.size +} + +// Mode return file mode +func (fi bindataFileInfo) Mode() os.FileMode { + return fi.mode +} + +// Mode return file modify time +func (fi bindataFileInfo) ModTime() time.Time { + return fi.modTime +} + +// IsDir return file whether a directory +func (fi bindataFileInfo) IsDir() bool { + return fi.mode&os.ModeDir != 0 +} + +// Sys return file is sys mode +func (fi bindataFileInfo) Sys() interface{} { + return nil +} + +var _openapiYaml = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\xec\x5d\x7b\x73\xdb\xb6\x96\xff\xdf\x9f\xe2\x4c\xf7\xce\x38\xe9\x46\x92\x9d\x74\x76\xb6\x9a\xe9\xcc\xe6\xb1\xbd\xf5\x6e\xea\x64\x63\xb7\x9d\xd9\x3b\x77\x63\x88\x3c\x32\x91\x80\x00\x0b\x80\x72\x74\xbb\xfd\xee\x77\xf0\x22\x41\x8a\x94\x28\x59\x79\x38\x96\xa7\x7f\x54\x20\x1e\x07\x07\x38\xbf\xf3\xc3\xc1\x23\xa2\x40\x4e\x0a\x3a\x85\x27\xe3\x93\xf1\xc9\x11\xe5\x73\x31\x3d\x02\xd0\x54\x33\x9c\xc2\x4f\xcb\x02\xe5\x8f\x0c\x51\xc3\xd3\xd7\x67\x47\x00\x0b\x94\x8a\x0a\x3e\x85\x53\x9b\x1d\x20\x11\x5c\x93\x44\x9b\x32\x00\x9c\xe4\xcd\x42\x97\x48\xf2\x23\x80\x14\x55\x22\x69\xa1\x6d\xc9\xff\x1f\xd9\xbc\xcd\xaa\xa1\x90\x62\x41\x53\x54\xa0\x68\x5e\x30\x84\xe7\x6f\x7e\x79\x01\xa2\x40\x49\x4c\x29\x05\x73\x21\x21\x27\x9c\x5c\x53\x7e\x0d\x09\x2b\x95\x46\x09\x12\x95\x28\x65\x82\x0a\x08\x4f\x41\x67\x48\x25\x28\x4d\x74\xa9\x20\xa3\x4a\x0b\xb9\x1c\x1f\xd9\xc6\xbe\xfd\xf6\xa9\x4c\x32\xaa\x31\xd1\xa5\xc4\x6f\xbf\x9d\xc2\x45\xdc\x0c\x67\xcb\x47\xc0\x05\xcc\x4a\x45\x39\x2a\x05\x4c\x5c\xd3\xc4\x26\xe1\x02\xb9\x86\x44\xa2\x95\x63\x6c\x6b\xbb\x40\xae\x29\x47\xe6\xe5\x13\x12\x32\xc2\x53\x66\xc4\x60\x0c\x84\x4c\x32\x54\xda\x09\xee\x6a\x72\xc5\x9e\xa6\xa4\xd0\x28\x95\xcf\x6d\xe4\x05\x55\x60\x42\xe7\x34\x51\x20\xe6\x75\xff\x4c\xea\x91\x26\xd7\x6a\x0a\x7f\xfb\xfb\x51\x41\x74\xa6\x8c\x82\x27\xa4\xa0\x93\xcc\xe8\x6d\x6e\xf4\x36\x59\x9c\x4e\xbc\x26\x94\xd3\xff\x35\xfa\x81\x80\x5a\x75\x67\xe9\xd4\xa4\x3f\xf7\x19\xfd\x67\x55\xe6\x39\x91\xcb\x29\xbc\xa4\x4a\x07\x7d\x86\x8f\x05\x91\x24\xc7\xba\x5a\xf3\x37\x82\xbf\x48\x9c\x4f\xe1\xf8\x5f\x26\x89\xc8\x0b\xc1\x91\x6b\x35\xa9\x73\x4e\xfe\xa7\x44\xb9\x7c\x6d\x7e\xab\x71\x41\xae\xf1\x78\xf7\xa2\x17\xf4\x1f\xbb\x16\x17\x32\x45\xf9\x6c\x79\x9b\xd2\x5b\x94\xbd\x40\x22\x93\xcc\x15\x0e\xc5\x24\xaa\x42\x70\x85\x91\xee\x8e\x1f\x9f\x9c\x1c\xd7\x3f\x5b\xf6\x70\x99\x21\x48\xfc\xbd\x44\xa5\x21\x23\x0a\x54\x99\x24\x88\x29\xa6\xe3\xa8\x84\x31\x33\xe4\x3a\xae\x04\x80\x14\x05\xa3\x89\x1d\xe5\xc9\x3b\x25\x78\xf3\x2b\x80\x4a\x32\xcc\x49\x3b\x15\x3a\x7b\xe5\xf2\xaa\x89\x9f\x27\x66\x5a\xd4\x8a\x48\x71\x4e\x4a\xa6\x7b\xbb\xf0\x94\x43\xc9\xf1\x43\x81\x89\xc6\x14\x50\x4a\x21\x2b\x3d\x7c\x96\x5e\xfc\xa7\x11\xc1\xc9\x5f\x08\xd5\x6d\x14\xe6\x83\xef\x6d\xdb\x28\x9e\x1b\x73\xc7\x60\x16\x47\x1d\x3d\xf6\x20\x66\xfe\x7c\x66\x02\x1c\x6f\x56\x90\xc9\xe3\x0f\x58\x0c\x3a\x17\xda\x62\x8f\x19\xf1\x2b\x87\x53\x57\x20\x66\xef\x30\xd1\x40\xb9\xc5\x83\xa0\x35\xa0\x0a\x24\x92\x74\x64\xb0\xc9\xa2\x9b\xe9\x65\x69\xd4\x3b\x5b\x3a\xe4\x40\xb9\xa0\x49\xa4\xde\x33\x6d\x0a\x9d\xbf\xba\x34\xf6\xab\x0d\x9e\xe8\x68\x66\xcd\x44\xba\x1c\xc3\x19\xa7\x9a\x12\x66\xe0\xce\xb5\x3f\x2e\x32\xa2\x10\x6e\x28\x63\x30\x43\xf8\xe6\x5c\xe8\x37\x48\xd2\xe5\x37\xa6\xcd\xaa\x6a\x9f\x97\x04\x00\x0b\xd9\x31\x2f\xf4\x12\x4a\xae\x29\x83\xea\xe3\xeb\x57\x17\x97\x0d\x2c\x1e\xaf\xc2\x8a\x81\x35\x97\xd8\x6d\x2e\xa7\x3b\x98\x8b\xd5\x92\x1b\x85\xa0\x7d\x9b\x61\x86\xc8\x1d\x80\x9b\x3c\x0a\x88\xf9\x5c\x32\xfd\x39\xcd\xeb\xeb\x30\x2d\x08\x03\xf1\x4c\xa4\xcb\xba\x16\x93\x48\x25\xa6\x53\xd0\xb2\xc4\xa3\x35\x12\xae\x97\xaf\x5b\xba\x01\xda\x75\x16\xf9\xc6\xc9\x76\xbc\xde\x71\x4e\xfe\xf0\xff\xf7\x96\xa6\x7f\x0e\xf5\xa2\xcf\x96\x67\x69\x1b\x33\xfe\x8a\x95\x1f\x35\x26\x7a\xf6\xe2\x36\xce\xb4\xcb\xb1\x98\x62\x8e\x60\xd5\x12\x47\xba\xa1\x7c\x0a\x86\x29\x44\x49\x3d\x03\xd1\xad\x59\xbd\x2c\x70\x0a\x4a\x4b\xca\xaf\xbf\x2a\x57\xf6\x35\xd8\xda\xd0\x09\x3c\xe1\x22\xc5\x42\x08\xb6\x99\x10\x9e\x8b\x14\x5f\x9b\x9c\xcf\x96\x5e\x51\xab\x73\xda\x92\x43\xc3\x69\xab\x6a\x2d\x0f\x5f\xe3\x17\xdf\xa0\x2e\x25\x57\xd6\xf5\x30\x53\x5a\xcc\x3b\x2a\x20\xad\x2a\xba\x6d\x64\x4f\x93\xbd\x21\x9f\xef\x6a\x6d\x9e\x7d\x43\xd3\x61\x0e\x07\x12\x7c\x5f\x48\x70\xb0\x8d\x7b\xc0\x82\x1d\x31\x0a\x1d\xee\x21\xc2\xc1\x7a\xbb\x2c\xbe\xa2\xbf\xa1\x8e\x3b\x6c\xe1\xf7\x8c\x8c\x86\x11\x0b\x7c\xc9\x75\xfe\xeb\x98\xf0\x5f\x1c\x37\x6d\x2b\x7b\x6b\x72\x5a\xfb\xf6\xc9\x1f\xe1\x7f\x07\x91\xd6\xd0\x72\x1f\x6b\x0d\x95\x35\x68\x6b\xa7\x57\x0f\xd1\xaa\x36\x20\xdc\x0d\xeb\xae\x65\x8a\xd4\xb7\x0f\xa1\x2a\xe4\xdb\x1f\xe6\xdc\x39\x57\xf9\x35\xa0\xc6\x6d\x2d\x71\xe2\xe2\x1d\xb8\x05\xf9\xbe\xf0\x25\x7a\x99\xb7\x0f\xab\x40\xa8\xda\x7a\xd7\xaa\xfd\x75\xc6\x9a\x94\x52\x22\xd7\x21\x20\xee\xe3\x4c\xa6\x38\x92\x24\xab\x2a\xd6\x19\x71\xf3\x48\x62\x21\xa4\x19\x12\x57\xe0\xde\x9a\xf6\x61\x35\x70\x58\x0d\x34\xf0\xc1\xef\xdb\x38\x53\xbd\x07\x4b\x02\xf3\x21\x00\x54\x1f\x3e\x05\xd6\xdf\x40\xa7\x2e\x34\x8a\x02\xe5\x5e\x8f\x9e\xfe\x2a\xa0\x5a\x01\x75\x01\xe9\x0e\x90\xd2\x19\x55\x01\x61\x6a\x55\x05\x70\xfb\xee\xe4\x7b\xa0\xf3\xaa\x7d\xc2\x24\x92\x74\x69\x27\x0b\xd9\x50\x19\x1c\x45\xb5\x85\x68\x3b\x4f\x58\x99\xa2\x8b\x5b\xf8\xe2\xd4\x91\xf8\x4c\xe2\xdc\xd6\x31\x2f\x75\x29\x31\xda\x93\x1c\xaf\xf4\x4c\x65\xa2\x64\x06\x40\x85\x44\x1f\x7d\x37\xe2\x62\x0a\x34\x9d\xd8\x8a\xb4\x80\xb2\x48\x8d\xea\x4c\xef\x7d\x4b\x8c\x44\x7d\x3c\x00\x6e\x4f\x93\xf7\x6c\x71\xd6\x40\x9d\x1a\x71\x8e\xbf\xdb\x04\x9c\x0a\xe5\xc2\x58\x99\x9d\x8a\x5c\x68\x28\x79\x8a\x52\x69\xbf\x45\x5e\xa9\x24\x2d\xd1\x4c\x47\xca\x17\x84\xd1\x14\xd4\x92\x6b\xf2\x61\x1c\x37\xf4\xdd\xb0\x86\x08\x37\xad\xcc\x69\xb3\x7e\x4c\xa3\x4d\xb0\xa8\xd2\xef\x87\x0d\x58\x22\xf8\x9c\xd1\x44\x2b\xb8\xa1\x3a\xb3\x15\xc7\x84\x06\xc3\xfe\x96\x13\x62\xfc\x65\xae\xf9\x1a\x63\xb8\xfb\xc2\x6f\x30\xad\xf4\x16\xbd\x1b\xa9\x1c\x10\xd5\xbd\x6f\x94\xf2\xc0\xef\xee\x39\xbf\xdb\x3b\x08\x0e\x39\x8d\x70\xe0\x5c\x07\xce\x75\x20\x40\x07\x02\x74\x2f\x09\x90\xc8\x0b\xa2\xe9\x8c\x32\xaa\x97\x9b\xf9\x4e\x9c\x7b\x1d\x71\xe9\xdd\x8e\x5e\x0b\x00\x07\xf7\xff\xa5\xb9\x7f\x8d\x1f\xf4\xa4\x60\x84\x0e\x46\x98\x0e\x1c\x55\x98\x94\xb2\x9a\x5e\xae\xdb\xcf\x90\x48\x94\x4f\x4b\x9d\xf9\xb3\x69\x1d\x73\x73\x87\xb3\x15\xbd\x0c\x7c\x7f\x07\x2a\x0e\x33\xf8\x4e\xcd\xe0\xc3\x71\x85\x4d\xb1\xc9\xfa\x8b\x29\xde\x9e\xd7\x2b\x43\x1f\xda\x70\x6c\xcd\x26\xf9\x14\x43\xd2\x7e\x37\xd9\x23\xdf\xe8\xdc\xe0\x9c\x30\x15\xfc\x60\x5b\xd2\x75\x12\xbe\x32\xb5\xbf\xa0\x12\x13\xd3\xf1\x55\x55\x5b\x05\xfb\x54\xfc\x50\x30\x91\x62\xdc\x58\xc7\xa4\xef\x90\xfe\xd9\xf2\x56\xf2\x77\x2e\x65\x2b\x01\x3d\xb9\x7b\x4b\xf4\x30\x31\x8d\x69\x37\x65\x34\x29\x7b\x10\x90\x72\x8d\xd7\xd5\x50\x81\x81\xc1\x9c\x68\x9b\xfe\xe4\xf1\xaa\xdc\xa7\xc3\xc5\x35\x48\xb4\x2a\xb2\x49\xfd\xf4\x62\x3f\x3e\xe9\x97\x3b\x06\xa2\xa6\xbc\xca\x7e\xd9\xf7\x2c\x58\x11\xc1\xcf\x6a\x57\xe8\xe9\xeb\xb3\x37\x9e\xad\x86\x5a\x5c\x1d\x6e\x45\x17\xbc\x8c\x34\xae\x4e\xd3\x18\x29\x19\x99\x21\x53\xb1\xa9\x77\x14\x34\x7f\x24\x4d\xa9\xb1\x1b\xc2\x5e\x77\x54\xd3\x3b\x77\x5b\xb0\xe5\x9a\xf3\x2b\x4c\xb4\x77\x87\xaa\x35\x0c\x51\x50\x10\x2a\xed\x65\x1a\xab\xca\x05\x61\x25\xfa\x0a\x83\xa7\x24\x8c\xbd\x9a\x6f\x72\x92\x95\xc5\xdb\x4e\xbc\xc1\x39\x4a\xe4\x89\xf7\x70\x0d\x56\xbb\x46\x5b\xd5\x50\x45\x8d\xf9\xd5\x73\x94\x22\x66\x96\xc2\xa7\x6f\xaf\x91\x7b\x1a\x11\x7d\x4d\x04\x77\x5a\x53\x6b\x86\xc0\x57\xba\x3a\x06\xeb\x55\x79\x4e\xf2\x6a\x19\xd1\x88\xdd\x79\x49\x30\x75\x8b\x78\xbf\x64\x7e\xf0\xab\x59\x27\x11\x2d\xe4\x23\x78\x71\x7e\x31\x1e\x8f\x1f\x56\x35\x77\x74\x62\x55\x9a\xb6\xf1\xf4\x99\x4f\x4b\xcc\xdf\x32\x9a\x64\x50\x57\xec\x0e\x98\x71\x40\xae\xa9\x5e\xc2\x83\x70\xb5\xe9\x51\x75\x10\x43\x3d\x84\x1b\x52\x87\x2b\x89\xb6\x5d\xd4\xd4\x75\xd7\x5d\xe6\xe2\xd7\x71\xe7\xe2\x45\x91\xd7\xf7\xaa\xfc\x44\x4a\xb2\x8c\x52\xa9\xc6\xbc\x35\x83\xd7\x9e\x47\x0e\x55\x1f\xf7\xf5\xf4\xbf\xcb\x19\x4a\x8e\x1a\xd5\x48\xe9\x25\xc3\x48\x1a\xd0\x92\x24\xef\x8d\xd4\x71\xfc\xa7\x5e\xcc\xa5\x44\x93\x5d\x6d\x10\xfe\xf8\xb3\x97\x24\xb8\xc6\x46\xd5\xc9\x1f\xd3\x0e\x3c\x50\x5a\x96\xf6\x2a\x1d\x2c\x88\xa4\xa8\x60\xb6\xac\x67\xd0\xb2\xc0\x7a\x62\xe4\xa8\xc9\x40\xd9\xba\x66\xb6\xf9\x7b\x27\x66\x6f\xad\x35\xb7\xd8\x44\xcf\x14\xaf\x0b\xa8\x82\x24\xc3\x4b\x11\xad\x31\x2f\x74\x77\xfe\xd5\xa9\x0b\x6b\xa6\xaf\xbd\x31\x23\x9d\x9f\x1d\xdc\xbe\x99\x31\x0c\xb7\x2c\x94\x96\xab\xc6\xb6\xa6\x40\x63\x6c\x7f\xf6\x43\x03\x64\x26\x4a\xdd\x40\x81\x77\x62\x36\xc1\x0f\x98\x94\x11\x1a\xe1\x07\x62\x04\x5c\x45\x1d\x58\x04\x5c\x58\x0b\x07\x95\x0f\xef\x33\xb2\xd1\xba\x1a\xeb\x4e\x3d\x5d\x10\xca\xc8\x8c\x61\x5b\xe1\x06\x8c\xe1\xf8\x52\x96\x78\xdc\xf8\x24\x91\x28\x3b\x97\x19\x0b\x15\x5b\x8b\x2a\x88\x52\x98\x36\xb2\xe6\xa8\x94\x61\x3c\x36\xef\x93\x93\x28\x3b\x68\x54\xba\xb3\xcc\x86\xbe\xda\xfe\x56\xac\x6b\x6a\x16\x16\x8f\x4f\x47\x27\xe6\xbf\xcb\x93\x93\xa9\xfd\xef\x7f\x9b\x02\xbb\x68\xe5\xc0\xec\x43\xb5\x66\x08\x7b\x4b\xf2\x01\x3a\xfb\xb5\x56\xc0\x3b\x31\x73\xb4\xbf\x4f\x67\xdd\x79\xdd\xda\x48\xa9\x79\xc9\xd8\xf2\x0e\x2a\xee\x27\x24\xac\x11\xe8\xdd\x45\x6f\x99\xad\x64\x39\x48\x6f\x54\x75\x66\xff\xac\xda\x6a\x23\xb8\xbf\xad\x2d\xbd\x13\x59\x9e\x4e\xab\xa4\x5f\x0d\xe5\x3a\x5d\x93\xf7\x71\x2b\xef\xe3\xb5\xbe\xa2\x02\xff\x7a\x90\x46\xef\xc4\xac\x23\x87\x43\xfb\xc0\xbd\x63\x9f\xe7\x81\xbd\xa1\xad\x08\xa0\x37\xeb\xaa\x01\xcd\x9b\xb3\x57\xa0\x0c\xa7\x27\x6a\x95\x31\x36\xe2\xa0\x77\x98\x3e\x86\x1d\x18\x9a\x1a\x26\x36\xa7\xb8\x1e\xff\xf7\x4b\x07\xc3\xc6\x52\x44\x0b\x63\xae\x2a\x71\xce\x30\xd1\x9f\x8b\xd7\x7d\x4a\x36\xb6\x0b\xd5\xda\xb6\xe1\xff\x12\x33\xa8\xf8\x40\xd5\xd0\xea\xd4\x7e\x49\xb7\x9e\xd1\x56\xd9\x6b\x26\xe7\xca\x60\xec\x65\xcc\x3a\x37\x97\xb6\x5a\x19\xd6\xc1\x36\xbf\xfe\xd8\xb2\xdf\x2b\x91\x18\x93\x58\x43\x72\x47\xce\xd9\xb2\x23\x67\x23\xb1\x13\x02\x9a\x67\x2f\x3a\x34\x1c\xb9\x8d\xcd\x08\x10\x4c\xd4\xb4\x3e\x32\x2b\xaa\xea\x5b\xe4\x4e\x6e\x53\x4d\xdd\xdb\x2d\xaa\xc1\x9c\x50\xb6\x22\xc9\x2d\xaa\xd8\x0f\x6e\xe5\x94\xd3\xbc\xcc\x9b\x9e\xa7\x61\x58\x7f\x8d\x56\xb5\x14\x59\x6a\xbc\xbf\x17\x1f\x04\x87\xa4\x54\x5a\xe4\x28\x7d\x9a\x7a\x14\x80\xcd\x2d\x5f\x31\xbc\xc7\x12\x56\xf1\xdf\x50\x1b\xab\xfd\x26\xfc\x0e\xe5\xab\xe6\x55\x23\x66\x01\xc3\x6e\xd0\xde\xc2\x4a\xc2\x85\x69\x52\xdf\x2b\x5a\x59\x47\xbc\xa7\x3c\xad\x76\xc6\x6b\xbb\x4f\xab\xbd\xf7\xd1\xe9\xe3\x27\x55\x7a\x66\x5b\xcb\xb4\x2e\xd4\x74\x32\x21\x05\x1d\xd7\xbb\x34\xe3\x44\xe4\x8d\x63\x54\x5d\x15\x34\xb6\xf5\x1b\x5f\x56\x23\x59\xc8\x17\x54\x0a\x9e\x23\xd7\x96\xb5\xa4\x65\xd2\xb0\x2e\x00\x8d\x24\x9f\x42\xc1\x88\x36\x93\xa0\xd6\x73\x81\x49\x03\x50\xb7\xe0\x66\x5b\xf0\xb2\x6e\x2e\xb8\x3a\xc8\xf6\x95\x87\x29\xd8\xa7\x1d\xa2\x64\x46\x94\x7e\xab\x25\xe1\xca\x7a\x83\xb7\xc6\x10\x07\xf0\x9b\x8d\x54\x74\x2b\x1e\x1e\x1e\x8e\x68\x82\xf7\x46\x76\xbe\x7e\x39\xb8\x96\xa4\x6f\xb5\x24\xdc\x6d\x51\x38\x88\xaf\x6f\xcd\xd8\xb7\xd4\x6c\x43\x8b\x29\x57\xfb\xd7\xdf\x8b\xf3\x0b\x90\x98\x08\x99\xaa\xd0\x97\x3e\xe5\x39\x28\x1a\xa7\x22\x27\x94\x1b\x43\xed\x29\xf0\xd9\xf5\x16\x39\x20\x28\x95\x83\x88\xff\xf0\xa0\x65\xe4\xee\x72\x33\xfd\x19\x23\x00\xdc\x92\x20\x18\x58\x8c\x7e\x1a\xd8\x8a\xbd\x7a\x81\xc9\x1a\x9f\x6e\x31\x75\x08\x9f\xf7\x1b\x15\x6d\xf4\x6d\xc7\xdb\x7a\xca\xe7\x94\xbf\x44\x7e\xad\xb3\xe6\x10\xe5\xe4\x43\x48\xfe\xb7\x27\x31\x0c\x99\xa5\x98\xe4\x53\xf8\xbf\xbf\x91\xd1\x3f\x4e\x46\xdf\x8f\xfe\xfe\xaf\x7f\xe9\x73\x8d\xe1\xa8\x94\x11\x05\x1e\x94\x9c\xfe\x5e\x46\x71\x45\x0b\xb0\x31\x8a\x34\xbd\x12\x0c\xf5\x4c\x17\x05\x26\xcf\x85\xc4\xde\x98\x6c\x74\x98\x2e\x1a\xcf\xea\x2a\x24\x69\xf9\x03\x80\xe7\x2f\x7f\x86\x54\xa0\xe2\xc7\x1a\xde\x73\x71\x03\x99\xb8\xb1\x67\xd0\x78\x4e\xa4\xca\x08\x63\xd5\xcb\x5f\x8f\x80\x6a\xfb\xfa\x98\x3b\xbe\xe6\x9e\x31\x9b\x0b\x79\x43\x8c\x41\x69\x51\x3f\xaa\xa3\x05\xa4\xc2\xbf\xaa\xd3\x5c\xfe\x02\x3c\x2b\xb5\x6d\xd4\xbe\xca\xe3\xc1\xc9\xbf\x2e\x66\xfb\x0a\x33\x9c\x0b\x89\x40\x92\x04\x8b\x8a\x39\xf8\xed\xeb\x5d\x9c\x7a\xb4\x57\xd4\x60\xc0\x43\x17\xb6\x9f\x88\x41\x7c\x7a\x47\x1f\x3d\x9e\x75\x27\x56\x41\xad\x17\x52\x6e\xb7\xfe\x09\x96\xb4\xa6\xe7\xbd\x4f\x68\x99\xe9\x99\x74\x59\x56\x74\xde\xd3\xce\x5e\x63\x22\xcb\x48\x43\x40\x54\x65\x4d\x86\x3b\x87\xc8\xd2\x88\x5c\x73\xa1\x74\x78\x7a\xcf\xfc\x5d\x66\x54\xb9\x67\xb5\x0a\x89\x0a\x79\x75\xc6\xd1\xbf\x38\xe8\x8f\xae\x1a\xab\x63\x4c\xdc\xc0\x9c\xe1\x07\x7f\xb4\x6c\xdc\xe8\xe7\x2e\xdb\x7e\x96\x80\x45\xbf\xbb\x98\x57\xf4\x79\x7d\x50\xa7\x73\x95\x18\xa0\x62\xcd\x24\x72\x2c\x70\x33\xaa\x23\x2f\xf3\x36\x94\x86\x87\xc1\x5a\xc9\x5d\x69\x3f\x12\xca\x1a\x6e\x7d\x2d\x9e\xfa\x3d\xba\x30\xfa\xee\x3d\xb2\x07\x9c\x68\xba\x40\x1b\xdf\x98\x99\x84\x44\xb0\x32\xe7\x0f\xc7\x8d\xb2\xbf\xf8\x85\xd2\x4d\x86\xbc\x86\x4a\x77\xe9\xa0\xf5\xf6\x98\xfb\x3b\x17\xda\xf6\x37\x7a\xf9\xac\x7a\x77\x92\x5c\x5f\x4b\xbc\xb6\xf5\x2d\x28\xde\xc0\x5c\x8a\xbc\x71\x59\xa2\x8e\x20\x8d\x23\x20\xe9\x20\xd0\xb7\x59\xfd\xae\x57\xd6\x6f\xa6\xa3\x41\x53\xa6\x6d\xa8\xdb\xc6\x14\x1e\x84\x95\x63\xbd\x1b\xa7\x1e\x19\x6a\x62\x53\xaa\x57\x2b\xe7\x42\xc2\x8c\x24\xef\xc5\x7c\xfe\x70\x5b\x7d\x02\x75\x0b\x4b\xa7\xbc\x24\x23\xfc\x1a\x6b\x5e\xf9\x31\xc3\x7e\x2d\x55\xbc\x34\xbd\x8f\x82\x7f\x85\x14\x09\x2a\xd5\xec\xfc\xed\x66\x8b\x05\x8c\xf0\xd4\x9d\x3d\x7f\x27\x6e\x50\x69\x70\x87\x0a\xc4\xbc\x71\xe9\xe5\x58\x75\xf5\xde\xe5\x55\xad\x6a\x83\xfa\x5c\x45\x54\x81\x99\xe2\x76\xbd\x6f\x43\x98\x1d\x36\xbf\xa7\xd8\xca\x7a\x95\x5e\xd2\xfa\x38\x80\x9d\x5c\x21\xec\x1d\x94\xe4\x4c\x62\x2f\xba\x7d\xc5\x52\xa3\xca\xab\xba\x63\x57\xae\xfa\x78\x17\x72\x25\x78\xdd\xbd\x58\xdc\x6f\x10\xb7\x5a\x0f\x1d\x6f\x72\x5a\xc1\x5f\x39\xc3\x08\xf8\x61\x43\x39\x2d\xec\x50\xd1\x33\x90\x76\x56\x85\x47\x1f\x55\xdf\xb3\x8e\x96\x09\x3e\x7f\x7a\x7e\xfe\xea\xd2\xcc\xbe\x5c\xa4\x74\x4e\x31\x85\xd4\x1e\x3e\x63\xcb\xf6\xb3\x8f\x11\x7a\xc5\xa3\x14\x84\xf3\xe1\x23\x7b\xcb\x04\x53\xe3\xe3\xae\xa2\xbb\x66\xf1\x1d\xb3\xab\x48\xd4\xd7\x01\x1b\x7f\x2f\x69\xf2\x1e\xc4\xc2\xc8\x86\x37\x66\x8e\xdc\xd8\x20\x3c\xa9\x5f\xb4\x5d\x60\x7d\xdb\xcb\xde\x2e\xa8\x05\xb2\x93\xdd\xbb\xd0\xa0\xe5\x5b\x6f\x7b\x98\x82\xf1\x72\xa8\x79\x8a\x63\xe3\xb6\xc8\xa0\xe0\xeb\x47\xd8\x2b\x69\xac\xe2\x6c\xc1\x4d\x15\xad\x06\x73\xb6\xf0\xda\x1d\x8b\xf7\x11\x1c\xff\x48\x98\x5a\x49\xfd\x85\x9b\xc5\x0a\xef\x5d\x8d\x85\x91\x6b\xab\xda\x87\x02\xb6\xd4\xc7\xcf\x24\xc9\x28\xc7\x91\x44\x92\x1a\x63\xf3\xd5\x40\x22\xd2\x5a\x3f\x21\x76\xb0\x65\xdd\x3f\x95\x39\xe1\x75\xcd\xbe\x96\x8f\xe3\xa4\xf6\x14\x3b\x1f\x00\xcc\x16\x5a\x6d\x4c\xb7\x1a\x09\x7b\xc6\xa9\x23\x88\x32\x82\x33\x0e\xa4\x02\x62\x6f\x98\xf6\xe1\xec\xfa\x3c\x51\x04\xbd\x3f\xfc\x50\x77\xe3\x6a\xb5\x26\x7f\xcc\xee\x11\x5c\x45\xd9\xaa\x25\xae\xe3\x00\xee\x92\x0a\xc7\x9b\x0a\x6a\xba\x9d\xc5\xc7\x71\x64\xeb\x14\xe4\x5b\xfc\x64\x94\xed\x37\x27\x86\x85\xf6\x20\x47\x07\x55\x8b\x71\x3f\x60\xbe\xed\x81\x47\x6c\x4f\xad\x1e\x36\x61\xb3\x72\x4e\x5b\xe2\x67\x84\x96\x5d\xa0\x36\x08\x88\xba\x11\xa6\x1d\x3e\xdc\x6e\x6d\x1f\xfa\x15\xaf\xee\xfb\x2c\xc1\xad\xbb\xaa\xf6\x8c\xd7\x8b\x6e\x0e\xd6\xca\x76\x13\x8f\x37\x4f\x34\x52\x55\xf1\xe0\xe7\x2f\x7f\x36\x0e\xd0\x0f\x40\xcd\x66\x6b\x97\x7a\x65\x7f\xff\x60\xaf\x37\x5e\x79\x4e\x13\x9c\xb9\xa8\x79\xcd\x55\x25\xcb\x0f\x5a\x96\x78\xd5\x1c\xaa\x1d\x22\x7e\x07\x2f\x77\xf0\x72\x83\xeb\x3e\x78\xb9\x83\x97\xeb\xf3\x72\xfd\x71\xb6\x3c\x17\xdc\x9d\xe1\x9d\x0b\xd9\xbc\xb4\xad\x1e\x85\x87\xe3\x63\x4c\x72\xa8\x5b\x9d\xb5\xb6\x71\xea\x14\x28\x87\x17\xcf\x1e\x46\xd9\x7c\xd0\x17\x0a\xb2\x64\x82\x38\x39\xec\x7d\x9e\x2d\x2f\x13\xd0\xcd\xbb\x16\x83\xb7\x36\x1a\xb7\xe8\xfc\x0d\x81\xc6\xa6\x8a\xdd\x4a\xde\xb5\xa6\x5f\xde\x9c\xd5\xf6\x21\xd2\xcd\xf6\x3b\x10\x42\xaa\xbb\x84\x6f\x37\xe8\x62\x87\x28\x73\x6b\x4b\xa9\x88\x01\x63\x04\xaa\xbe\x22\x63\x7d\x8f\xd0\x84\x45\xbf\x37\x85\xa8\x07\x8d\x4b\xd1\x89\x74\x43\x71\x48\x45\x57\x7b\xb6\x2f\x6d\x3b\xb4\x7b\xf1\xad\x42\xf0\x61\x47\x20\xdc\x48\xf8\x02\x0e\x07\x89\x1b\x8e\xf2\xad\x0c\x57\x59\x54\x1f\xaf\x38\x1c\x11\x1a\x56\x45\x5b\x9f\x43\x0f\xd5\x74\x5e\x29\x82\xad\xcf\xe6\x34\x5f\x68\xdb\x69\x0b\xa7\x7a\x18\xb6\x62\xdf\x71\xca\x96\x53\x76\xbb\xfd\xe9\xbb\x76\x69\x6c\x90\x67\xea\x73\x15\x1d\x07\x64\xbf\x40\x27\x36\x70\xcb\xbf\xfb\xe5\xdb\x78\x93\xde\xb0\x83\xea\xfd\xef\x3d\xee\xd8\x57\x33\xfe\xb0\x65\x5f\x5b\xea\x6d\x0e\x93\x1f\x4c\xf6\x60\xb2\x07\x93\xfd\x6c\x26\xeb\x9e\x5a\xb8\x43\xcc\x70\x7b\xcc\x38\xf0\xc6\x3b\xc2\x1b\x0f\xf0\x7e\x80\xf7\x03\xbc\xef\x05\xde\xef\xcc\xe9\xbf\xf6\x3f\xdd\xb0\xe3\xf1\xbf\xf6\x24\xd8\xf5\xfc\x5f\xf5\xcf\x90\x7c\xb1\x07\x00\x9b\xd0\x79\xcb\x13\x80\xeb\xb7\x6e\x36\x9c\x0f\xfc\x1a\x4f\x00\x56\xc8\x75\xb7\x8e\x00\xee\xf9\xcc\xdb\xe6\x2b\x63\xf7\xfa\x20\xdc\x27\xd9\xbd\xaf\x66\xe2\x7e\x8f\x58\x1e\x0e\xf1\xf9\xbf\x2f\xe8\x10\x5f\x35\xd4\x9f\xf1\x14\x5f\x8b\xd8\xef\x7f\xc7\xec\xee\xb2\xe0\xe6\x83\x7b\x4d\xcd\x34\x1f\x37\x6b\x38\xac\x11\x90\xea\x1d\x3e\xf3\xcb\x3f\xcb\x17\x1e\xfe\xbc\x30\xd3\x25\xe8\x30\x7a\xf9\xb3\x51\x7d\xa6\x75\xe1\x13\xec\xf4\xc2\xa9\xcf\x7a\xe4\x5e\xdc\xb5\xc5\x47\x50\x4a\xe6\xf2\x4e\x27\x13\x26\x12\xc2\x32\xa1\xf4\xf4\xdf\x4f\x4e\xdc\xcb\x6f\x8d\xee\xbd\xc0\x05\x32\x51\xe4\xc8\xdd\x80\x2e\x88\xa4\x66\x92\xba\xed\xa2\x7f\x06\x00\x00\xff\xff\x9d\xec\xe5\x93\x8a\x81\x00\x00") + +func openapiYamlBytes() ([]byte, error) { + return bindataRead( + _openapiYaml, + "openapi.yaml", + ) +} + +func openapiYaml() (*asset, error) { + bytes, err := openapiYamlBytes() + if err != nil { + return nil, err + } + + info := bindataFileInfo{name: "openapi.yaml", size: 33162, mode: os.FileMode(420), modTime: time.Unix(1762937650, 0)} + a := &asset{bytes: bytes, info: info} + return a, nil +} + +// Asset loads and returns the asset for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func Asset(name string) ([]byte, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("Asset %s can't read by error: %v", name, err) + } + return a.bytes, nil + } + return nil, fmt.Errorf("Asset %s not found", name) +} + +// MustAsset is like Asset but panics when Asset would return an error. +// It simplifies safe initialization of global variables. +func MustAsset(name string) []byte { + a, err := Asset(name) + if err != nil { + panic("asset: Asset(" + name + "): " + err.Error()) + } + + return a +} + +// AssetInfo loads and returns the asset info for the given name. +// It returns an error if the asset could not be found or +// could not be loaded. +func AssetInfo(name string) (os.FileInfo, error) { + cannonicalName := strings.Replace(name, "\\", "/", -1) + if f, ok := _bindata[cannonicalName]; ok { + a, err := f() + if err != nil { + return nil, fmt.Errorf("AssetInfo %s can't read by error: %v", name, err) + } + return a.info, nil + } + return nil, fmt.Errorf("AssetInfo %s not found", name) +} + +// AssetNames returns the names of the assets. +func AssetNames() []string { + names := make([]string, 0, len(_bindata)) + for name := range _bindata { + names = append(names, name) + } + return names +} + +// _bindata is a table, holding each asset generator, mapped to its name. +var _bindata = map[string]func() (*asset, error){ + "openapi.yaml": openapiYaml, +} + +// AssetDir returns the file names below a certain +// directory embedded in the file by go-bindata. +// For example if you run go-bindata on data/... and data contains the +// following hierarchy: +// data/ +// foo.txt +// img/ +// a.png +// b.png +// then AssetDir("data") would return []string{"foo.txt", "img"} +// AssetDir("data/img") would return []string{"a.png", "b.png"} +// AssetDir("foo.txt") and AssetDir("notexist") would return an error +// AssetDir("") will return []string{"data"}. +func AssetDir(name string) ([]string, error) { + node := _bintree + if len(name) != 0 { + cannonicalName := strings.Replace(name, "\\", "/", -1) + pathList := strings.Split(cannonicalName, "/") + for _, p := range pathList { + node = node.Children[p] + if node == nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + } + } + if node.Func != nil { + return nil, fmt.Errorf("Asset %s not found", name) + } + rv := make([]string, 0, len(node.Children)) + for childName := range node.Children { + rv = append(rv, childName) + } + return rv, nil +} + +type bintree struct { + Func func() (*asset, error) + Children map[string]*bintree +} + +var _bintree = &bintree{nil, map[string]*bintree{ + "openapi.yaml": &bintree{openapiYaml, map[string]*bintree{}}, +}} + +// RestoreAsset restores an asset under the given directory +func RestoreAsset(dir, name string) error { + data, err := Asset(name) + if err != nil { + return err + } + info, err := AssetInfo(name) + if err != nil { + return err + } + err = os.MkdirAll(_filePath(dir, filepath.Dir(name)), os.FileMode(0755)) + if err != nil { + return err + } + err = ioutil.WriteFile(_filePath(dir, name), data, info.Mode()) + if err != nil { + return err + } + err = os.Chtimes(_filePath(dir, name), info.ModTime(), info.ModTime()) + if err != nil { + return err + } + return nil +} + +// RestoreAssets restores an asset under the given directory recursively +func RestoreAssets(dir, name string) error { + children, err := AssetDir(name) + // File + if err != nil { + return RestoreAsset(dir, name) + } + // Dir + for _, child := range children { + err = RestoreAssets(dir, filepath.Join(name, child)) + if err != nil { + return err + } + } + return nil +} + +func _filePath(dir, name string) string { + cannonicalName := strings.Replace(name, "\\", "/", -1) + return filepath.Join(append([]string{dir}, strings.Split(cannonicalName, "/")...)...) +} diff --git a/docs/continuous-delivery-migration.md b/docs/continuous-delivery-migration.md new file mode 100755 index 0000000..8e946c3 --- /dev/null +++ b/docs/continuous-delivery-migration.md @@ -0,0 +1,25 @@ +## CD Migration + +Migrations can be problematic when trying to move towards continuous deployment. At first glance it looks impossible to CD a breaking migration. Essentially the solution is always to break the migration into multiple smaller and safer steps. Note that migrations can and should be tested -- like any other code change. In AMS we have a lot of experience testing complicated migrations: + +https://gitlab.cee.redhat.com/service/uhc-account-manager/blob/master/pkg/db/README.md#migration-tests + +https://gitlab.cee.redhat.com/service/uhc-account-manager/blob/master/test/integration/migrations_test.go + +### Example + +In this example from AMS we drop the `subscription_managed` field from `resource_quota` table and AMS API. This change is not backwards compatible as any existing running image of AMS will error if we simply drop the column from the database table. The solution is to break this migration down into multiple steps. + +In AMS we deploy our services on pods running RollingUpdate. When deploying a code change each pod will roll and try to run migrations as part of the init container. We ensure migrations run only once by relying on [postgres' advisory lock](https://gitlab.cee.redhat.com/service/uhc-account-manager/blob/master/pkg/db/migrations.go#L193). + +In order to deploy this change in a CD way we must first merge the change removing support for the field in the API. That code change needs to propagate through to all production service pods and cronjob pods: + +https://issues.redhat.com/browse/SDB-849 + +https://gitlab.cee.redhat.com/service/uhc-account-manager/-/merge_requests/1213 + +Note that the merge request was merged Jan 22 2020. After the code change is fully deployed we drop the field from the database it is no longer used by any AMS code. Note that the second merge request was merged nearly 2 weeks later on Feb 3 2020: + +https://issues.redhat.com/browse/SDB-858 + +https://gitlab.cee.redhat.com/service/uhc-account-manager/-/merge_requests/1214 diff --git a/docs/dao.md b/docs/dao.md new file mode 100755 index 0000000..35f0e58 --- /dev/null +++ b/docs/dao.md @@ -0,0 +1,7 @@ +**DAO** stands for Data Access Object. It is used to separate the data persistence logic in a separate layer, which is known as ***Separation of Logic***. + +DAO pattern emphasises the low coupling between different components of an application. None of layers depend on it, but only `services` layer. (The most proper way would be using interfaces and not a concrete implementation. We're not using interfaces as there are no plans to replace implementation in foreseeable future.) + +As the persistence logic is completely separate, it is much easier to write Unit tests for individual components. It is quite easy to mock data for an individual component of the application. + +The DAO layer implementation resides in package `dao`, and correspondent mocks in package `mocks`. An example of a Unit test may be found in `pkg/services/dinosaurs_test.go`. diff --git a/docs/testcontainers.md b/docs/testcontainers.md new file mode 100755 index 0000000..e2405c0 --- /dev/null +++ b/docs/testcontainers.md @@ -0,0 +1,57 @@ +# Testcontainers + +hyperfleet uses https://github.com/testcontainers/testcontainers-go/ for integration tests to spin up ephemeral containers for tests. + +The containers used by the tests are initialized/destroyed in the `integration_testing` environment. + + +## Compatibility with podman + +testcontainers project only supports Docker officially and some errors can appear with podman. + +If you encounter the following error: + +``` +Failed to start PostgreSQL testcontainer: create container: container create: Error response from daemon: container create: unable to find network with name or ID bridge: network not found: creating reaper failed +``` +It can happen because testcontainers spin up an additional [testcontainers/ryuk](https://github.com/testcontainers/moby-ryuk) container that manages the lifecycle of the containers used in the tests and performs cleanup in case there are fails. + + +One way to bypass this problem is not to use ryuk setting the environment variable + +```bash +TESTCONTAINERS_RYUK_DISABLED=true +``` + +Or setting a property in `~/.testcontainers.properties` + +``` +ryuk.disabled=true +``` + +Ryuk needs to execute with root permissions in the podman machine to manage other containers. This [issue](https://github.com/testcontainers/testcontainers-go/issues/2781#issuecomment-2619626043) in testcontainer's repository offers an alternative solution. Be mindful of the elevated permissions required: + +```bash +# verify socket path inside podman machine +$ podman machine ssh +Connecting to vm podman-machine-default. To close connection, use `~.` or `exit` +Fedora CoreOS 40.20240808.2.0 + +root@localhost:~# ls -al /var/run/podman/podman.sock +srw-rw----. 1 root root 0 Dec 20 14:32 /var/run/podman/podman.sock +exit + +# On the host machine +$ sudo mkdir /var/run/podman +$ sudo ln -s /Users/Your.User/.local/share/containers/podman/machine/podman.sock /var/run/podman/podman.sock + +export DOCKER_HOST="unix:///var/run/podman/podman.sock" +export TESTCONTAINERS_RYUK_CONTAINER_PRIVILEGED=true + +# if it still fails, give permissions to /var/run/podman/podman.sock within the podman machine +$ sudo chmod a+xrw /var/run/podman +$ sudo chmod a+xrw /var/run/podman/podman.sock +``` + + + diff --git a/go.mod b/go.mod new file mode 100755 index 0000000..63b5136 --- /dev/null +++ b/go.mod @@ -0,0 +1,128 @@ +module github.com/openshift-hyperfleet/hyperfleet-api + +go 1.24.0 + +toolchain go1.24.9 + +require ( + github.com/DATA-DOG/go-sqlmock v1.5.2 + github.com/Masterminds/squirrel v1.1.0 + github.com/auth0/go-jwt-middleware v0.0.0-20190805220309-36081240882b + github.com/bxcodec/faker/v3 v3.2.0 + github.com/docker/go-healthcheck v0.1.0 + github.com/ghodss/yaml v1.0.0 + github.com/go-gormigrate/gormigrate/v2 v2.0.0 + github.com/golang-jwt/jwt/v4 v4.5.0 + github.com/golang/glog v1.2.5 + github.com/google/uuid v1.6.0 + github.com/gorilla/handlers v1.4.2 + github.com/gorilla/mux v1.7.3 + github.com/jinzhu/inflection v1.0.0 + github.com/lib/pq v1.10.9 + github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 + github.com/onsi/gomega v1.27.1 + github.com/openshift-online/ocm-sdk-go v0.1.334 + github.com/prometheus/client_golang v1.16.0 + github.com/segmentio/ksuid v1.0.2 + github.com/spf13/cobra v0.0.5 + github.com/spf13/pflag v1.0.5 + github.com/testcontainers/testcontainers-go v0.33.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.33.0 + github.com/yaacov/tree-search-language v0.0.0-20190923184055-1c2dad2e354b + gopkg.in/resty.v1 v1.12.0 + gorm.io/datatypes v1.2.7 + gorm.io/driver/postgres v1.6.0 + gorm.io/gorm v1.30.0 +) + +require ( + dario.cat/mergo v1.0.2 // indirect + filippo.io/edwards25519 v1.1.0 // indirect + github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.2 // indirect + github.com/antlr/antlr4 v0.0.0-20190518164840-edae2a1c9b4b // indirect + github.com/aymerick/douceur v0.2.0 // indirect + github.com/beorn7/perks v1.0.1 // indirect + github.com/cenkalti/backoff/v4 v4.2.1 // indirect + github.com/cespare/xxhash/v2 v2.3.0 // indirect + github.com/containerd/errdefs v1.0.0 // indirect + github.com/containerd/errdefs/pkg v0.3.0 // indirect + github.com/containerd/log v0.1.0 // indirect + github.com/containerd/platforms v0.2.1 // indirect + github.com/cpuguy83/dockercfg v0.3.2 // indirect + github.com/dgrijalva/jwt-go v3.2.0+incompatible // indirect + github.com/distribution/reference v0.6.0 // indirect + github.com/docker/distribution v2.8.1+incompatible // indirect + github.com/docker/docker v28.5.1+incompatible // indirect + github.com/docker/go-connections v0.6.0 // indirect + github.com/docker/go-units v0.5.0 // indirect + github.com/felixge/httpsnoop v1.0.4 // indirect + github.com/go-logr/logr v1.4.3 // indirect + github.com/go-logr/stdr v1.2.2 // indirect + github.com/go-ole/go-ole v1.2.6 // indirect + github.com/go-sql-driver/mysql v1.8.1 // indirect + github.com/golang/protobuf v1.5.4 // indirect + github.com/google/go-cmp v0.7.0 // indirect + github.com/gorilla/css v1.0.0 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 // indirect + github.com/inconshreveable/mousetrap v1.0.0 // indirect + github.com/jackc/pgpassfile v1.0.0 // indirect + github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 // indirect + github.com/jackc/pgx/v5 v5.6.0 // indirect + github.com/jackc/puddle/v2 v2.2.2 // indirect + github.com/jinzhu/now v1.1.5 // indirect + github.com/json-iterator/go v1.1.12 // indirect + github.com/klauspost/compress v1.18.0 // indirect + github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 // indirect + github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 // indirect + github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect + github.com/magiconair/properties v1.8.10 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/microcosm-cc/bluemonday v1.0.23 // indirect + github.com/moby/docker-image-spec v1.3.1 // indirect + github.com/moby/go-archive v0.1.0 // indirect + github.com/moby/patternmatcher v0.6.0 // indirect + github.com/moby/sys/atomicwriter v0.1.0 // indirect + github.com/moby/sys/sequential v0.6.0 // indirect + github.com/moby/sys/user v0.4.0 // indirect + github.com/moby/sys/userns v0.1.0 // indirect + github.com/moby/term v0.5.0 // indirect + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.2 // indirect + github.com/morikuni/aec v1.0.0 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/opencontainers/image-spec v1.1.1 // indirect + github.com/pkg/errors v0.9.1 // indirect + github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect + github.com/prometheus/client_model v0.3.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.10.1 // indirect + github.com/shirou/gopsutil/v3 v3.23.12 // indirect + github.com/shoenig/go-m1cpu v0.1.6 // indirect + github.com/sirupsen/logrus v1.9.3 // indirect + github.com/smartystreets/goconvey v1.8.1 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect + github.com/urfave/negroni v1.0.0 // indirect + github.com/yusufpapurcu/wmi v1.2.4 // indirect + go.opentelemetry.io/auto/sdk v1.1.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.38.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 // indirect + go.opentelemetry.io/otel/metric v1.38.0 // indirect + go.opentelemetry.io/otel/sdk v1.38.0 // indirect + go.opentelemetry.io/otel/trace v1.38.0 // indirect + golang.org/x/crypto v0.41.0 // indirect + golang.org/x/net v0.43.0 // indirect + golang.org/x/sync v0.17.0 // indirect + golang.org/x/sys v0.36.0 // indirect + golang.org/x/text v0.29.0 // indirect + golang.org/x/time v0.3.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20251014184007-4626949a642f // indirect + google.golang.org/grpc v1.75.1 // indirect + google.golang.org/protobuf v1.36.10 // indirect + gopkg.in/yaml.v2 v2.4.0 // indirect + gopkg.in/yaml.v3 v3.0.1 // indirect + gorm.io/driver/mysql v1.5.6 // indirect +) diff --git a/go.sum b/go.sum new file mode 100755 index 0000000..dc5689a --- /dev/null +++ b/go.sum @@ -0,0 +1,1049 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.37.4/go.mod h1:NHPJ89PdicEuT9hdPXMROBD91xc5uRDxsMtSB16k7hw= +cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= +cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= +cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= +cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= +cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= +cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= +cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= +cloud.google.com/go v0.52.0/go.mod h1:pXajvRH/6o3+F9jDHZWQ5PbGhn+o8w9qiu/CffaVdO4= +cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6M= +cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= +cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= +cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= +cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= +cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= +cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= +cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= +cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= +cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= +cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= +cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= +cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= +cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= +cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= +cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= +cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= +cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= +cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= +cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= +cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= +cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= +cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= +dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8= +dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA= +dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= +filippo.io/edwards25519 v1.1.0 h1:FNf4tywRC1HmFuKW5xopWpigGjJKiJSV0Cqo0cJWDaA= +filippo.io/edwards25519 v1.1.0/go.mod h1:BxyFTGdWcka3PhytdK4V28tE5sGfRvvvRV7EaN4VDT4= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8afgbRMd7mFxO99hRNu+6tazq8nFF9lIwo9JFroBk= +github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8= +github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= +github.com/DATA-DOG/go-sqlmock v1.5.2 h1:OcvFkGmslmlZibjAjaHm3L//6LiuBgolP7OputlJIzU= +github.com/DATA-DOG/go-sqlmock v1.5.2/go.mod h1:88MAG/4G7SMwSE3CeA0ZKzrT5CiOU3OJ+JlNzwDqpNU= +github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= +github.com/Masterminds/squirrel v1.1.0 h1:baP1qLdoQCeTw3ifCdOq2dkYc6vGcmRdaociKLbEJXs= +github.com/Masterminds/squirrel v1.1.0/go.mod h1:yaPeOnPG5ZRwL9oKdTsO/prlkPbXWZlRVMQ/gGlzIuA= +github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY= +github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/antlr/antlr4 v0.0.0-20190518164840-edae2a1c9b4b h1:IyTcB1l64U991qSZ0ufqiJv9GVEOUBiSPwsObDm7+cc= +github.com/antlr/antlr4 v0.0.0-20190518164840-edae2a1c9b4b/go.mod h1:T7PbCXFs94rrTttyxjbyT5+/1V8T2TYDejxUfHJjw1Y= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/auth0/go-jwt-middleware v0.0.0-20190805220309-36081240882b h1:CvoEHGmxWl5kONC5icxwqV899dkf4VjOScbxLpllEnw= +github.com/auth0/go-jwt-middleware v0.0.0-20190805220309-36081240882b/go.mod h1:LWMyo4iOLWXHGdBki7NIht1kHru/0wM179h+d3g8ATM= +github.com/aymerick/douceur v0.2.0 h1:Mv+mAeH1Q+n9Fr+oyamOlAkUNPWPlA8PPGR0QAaYuPk= +github.com/aymerick/douceur v0.2.0/go.mod h1:wlT5vV2O3h55X9m7iVYN0TBM0NH/MmbLnd30/FjWUq4= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= +github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/bxcodec/faker/v3 v3.2.0 h1:L3cTa9Tptyk0jsF/R6RooDZwxwA8dDi6IWdkIu8jwKo= +github.com/bxcodec/faker/v3 v3.2.0/go.mod h1:gF31YgnMSMKgkvl+fyEo1xuSMbEuieyqfeslGYFjneM= +github.com/cenkalti/backoff/v4 v4.1.3/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInqkPWOWmG2CLw= +github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= +github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= +github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM= +github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= +github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= +github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= +github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= +github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= +github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI= +github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= +github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= +github.com/containerd/errdefs v1.0.0 h1:tg5yIfIlQIrxYtu9ajqY42W3lpS19XqdxRQeEwYG8PI= +github.com/containerd/errdefs v1.0.0/go.mod h1:+YBYIdtsnF4Iw6nWZhJcqGSg/dwvV7tyJ/kCkyJ2k+M= +github.com/containerd/errdefs/pkg v0.3.0 h1:9IKJ06FvyNlexW690DXuQNx2KA2cUJXx151Xdx3ZPPE= +github.com/containerd/errdefs/pkg v0.3.0/go.mod h1:NJw6s9HwNuRhnjJhM7pylWwMyAkmCQvQ4GpJHEqRLVk= +github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= +github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo= +github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A= +github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= +github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA= +github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc= +github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= +github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= +github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY= +github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20190423183735-731ef375ac02/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20190515213511-eb9f6a1743f3/go.mod h1:zAg7JM8CkOJ43xKXIj7eRO9kmWm/TW578qo+oDO6tuM= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= +github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68= +github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= +github.com/docker/docker v28.5.1+incompatible h1:Bm8DchhSD2J6PsFzxC35TZo4TLGR2PdW/E69rU45NhM= +github.com/docker/docker v28.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= +github.com/docker/go-connections v0.6.0 h1:LlMG9azAe1TqfR7sO+NJttz1gy6KO7VJBh+pMmjSD94= +github.com/docker/go-connections v0.6.0/go.mod h1:AahvXYshr6JgfUJGdDCs2b5EZG/vmaMAntpSFH5BFKE= +github.com/docker/go-healthcheck v0.1.0 h1:6ZrRr63F5LLsPwSlbZgjgoxNu+o1VlMIhCQWgbfrgU0= +github.com/docker/go-healthcheck v0.1.0/go.mod h1:3v7a0338vhH6WnYFtUd66S+9QK3M6xK4sKr7gGrht6o= +github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= +github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= +github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= +github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= +github.com/erikstmartin/go-testdb v0.0.0-20160219214506-8d10e4a1bae5/go.mod h1:a2zkGnVExMxdzMo3M0Hi/3sEU+cWnZpSni0O6/Yb/P0= +github.com/evanphx/json-patch/v5 v5.6.0 h1:b91NhWfaz02IuVxO9faSllyAtNXHMPkC5J8sJCLunww= +github.com/evanphx/json-patch/v5 v5.6.0/go.mod h1:G79N1coSVB93tBe7j6PhzjmR3/2VvlbKOFpnXhI9Bw4= +github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= +github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= +github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= +github.com/go-gormigrate/gormigrate/v2 v2.0.0 h1:e2A3Uznk4viUC4UuemuVgsNnvYZyOA8B3awlYk3UioU= +github.com/go-gormigrate/gormigrate/v2 v2.0.0/go.mod h1:YuVJ+D/dNt4HWrThTBnjgZuRbt7AuwINeg4q52ZE3Jw= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= +github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI= +github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= +github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= +github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= +github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-sql-driver/mysql v1.8.1 h1:LedoTUt/eveggdHS9qUFC1EFSa8bU2+1pZjSRpvNJ1Y= +github.com/go-sql-driver/mysql v1.8.1/go.mod h1:wEBSXgmK//2ZFJyE+qWnIsVGmvmEKlqwuVSjsCm7DZg= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= +github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang-jwt/jwt/v4 v4.4.1/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-jwt/jwt/v4 v4.5.0 h1:7cYmW1XlMY7h7ii7UhUyChSgS5wUJEnm9uZVTGqOWzg= +github.com/golang-jwt/jwt/v4 v4.5.0/go.mod h1:m21LjoU+eqJr34lmDMbreY2eSTRJ1cv77w39/MY0Ch0= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9 h1:au07oEsX2xN0ktxqI+Sida1w446QrXBRJ0nee3SNZlA= +github.com/golang-sql/civil v0.0.0-20220223132316-b832511892a9/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-sql/sqlexp v0.1.0 h1:ZCD6MBpcuOVfGVqsEmY5/4FtYiKz6tSyUv9LPEDei6A= +github.com/golang-sql/sqlexp v0.1.0/go.mod h1:J4ad9Vo8ZCWQ2GMrC4UCQy1JpCbwU9m3EOqtpKwwwHI= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= +github.com/golang/glog v1.2.5 h1:DrW6hGnjIhtvhOIiAKT6Psh/Kd/ldepEa81DKeiRJ5I= +github.com/golang/glog v1.2.5/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y= +github.com/golang/mock v1.4.0/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= +github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk= +github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= +github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= +github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= +github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w= +github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= +github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= +github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= +github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= +github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/golang/snappy v0.0.1/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= +github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= +github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= +github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= +github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= +github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= +github.com/gopherjs/gopherjs v1.17.2 h1:fQnZVsXk8uxXIStYb0N4bGk7jeyTalG/wsZjQ25dO0g= +github.com/gopherjs/gopherjs v1.17.2/go.mod h1:pRRIvn/QzFLrKfvEz3qUuEhtE/zLCWfreZ6J5gM2i+k= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/css v1.0.0 h1:BQqNyPTi50JCFMTw/b67hByjMVXZRwGha6wxVGkeihY= +github.com/gorilla/css v1.0.0/go.mod h1:Dn721qIggHpt4+EFCcTLTU/vk5ySda2ReITrtgBl60c= +github.com/gorilla/handlers v1.4.2 h1:0QniY0USkHQ1RGCLfKxeNHK9bkDHGRYGNDFBCS+YARg= +github.com/gorilla/handlers v1.4.2/go.mod h1:Qkdc/uu4tH4g6mTK6auzZ766c4CA0Ng8+o/OAirnOIQ= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/gorilla/mux v1.7.3 h1:gnP5JzjVOuiZD07fKKToCAOjS0yOpj/qPETTXCCS6hw= +github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= +github.com/graphql-go/graphql v0.7.8/go.mod h1:k6yrAYQaSP59DC5UVxbgxESlmVyojThKdORUqGDGmrI= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3 h1:NmZ1PKzSTQbuGHw9DGPFomqkkLWMC+vZCkfs+FHv1Vg= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.3/go.mod h1:zQrxl1YP88HQlA6i9c63DSVPFklWpGX4OWAc9bFuaH4= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/hokaccha/go-prettyjson v0.0.0-20180920040306-f579f869bbfe/go.mod h1:pFlLw2CfqZiIBOx6BuCeRLCrfxBJipTY0nIOF/VbGcI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= +github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM= +github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= +github.com/itchyny/gojq v0.12.7 h1:hYPTpeWfrJ1OT+2j6cvBScbhl0TkdwGM4bc66onUSOQ= +github.com/itchyny/gojq v0.12.7/go.mod h1:ZdvNHVlzPgUf8pgjnuDTmGfHA/21KoutQUJ3An/xNuw= +github.com/itchyny/timefmt-go v0.1.3 h1:7M3LGVDsqcd0VZH2U+x393obrzZisp7C0uEe921iRkU= +github.com/itchyny/timefmt-go v0.1.3/go.mod h1:0osSSCQSASBJMsIZnhAaF1C2fCBTJZXrnj37mG8/c+A= +github.com/jackc/chunkreader v1.0.0 h1:4s39bBR8ByfqH+DKm8rQA3E1LHZWB9XWcrz8fqaZbe0= +github.com/jackc/chunkreader v1.0.0/go.mod h1:RT6O25fNZIuasFJRyZ4R/Y2BbhasbmZXF9QQ7T3kePo= +github.com/jackc/chunkreader/v2 v2.0.0/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/chunkreader/v2 v2.0.1 h1:i+RDz65UE+mmpjTfyz0MoVTnzeYxroil2G82ki7MGG8= +github.com/jackc/chunkreader/v2 v2.0.1/go.mod h1:odVSm741yZoC3dpHEUXIqA9tQRhFrgOHwnPIn9lDKlk= +github.com/jackc/pgconn v0.0.0-20190420214824-7e0022ef6ba3/go.mod h1:jkELnwuX+w9qN5YIfX0fl88Ehu4XC3keFuOJJk9pcnA= +github.com/jackc/pgconn v0.0.0-20190824142844-760dd75542eb/go.mod h1:lLjNuW/+OfW9/pnVKPazfWOgNfH2aPem8YQ7ilXGvJE= +github.com/jackc/pgconn v0.0.0-20190831204454-2fabfa3c18b7/go.mod h1:ZJKsE/KZfsUgOEh9hBm+xYTstcNHg7UPMVJqRfQxq4s= +github.com/jackc/pgconn v1.4.0/go.mod h1:Y2O3ZDF0q4mMacyWV3AstPJpeHXWGEetiFttmq5lahk= +github.com/jackc/pgconn v1.5.0/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.5.1-0.20200601181101-fa742c524853/go.mod h1:QeD3lBfpTFe8WUnPZWN5KY/mB8FGMIYRdd8P8Jr0fAI= +github.com/jackc/pgconn v1.6.4/go.mod h1:w2pne1C2tZgP+TvjqLpOigGzNqjBgQW9dUw/4Chex78= +github.com/jackc/pgconn v1.8.0/go.mod h1:1C2Pb36bGIP9QHGBYCjnyhqu7Rv3sGshaQUvmfGIB/o= +github.com/jackc/pgconn v1.9.0/go.mod h1:YctiPyvzfU11JFxoXokUOOKQXQmDMoJL9vJzHH8/2JY= +github.com/jackc/pgconn v1.9.1-0.20210724152538-d89c8390a530/go.mod h1:4z2w8XhRbP1hYxkpTuBjTS3ne3J48K83+u0zoyvg2pI= +github.com/jackc/pgconn v1.12.0 h1:/RvQ24k3TnNdfBSW0ou9EOi5jx2cX7zfE8n2nLKuiP0= +github.com/jackc/pgconn v1.12.0/go.mod h1:ZkhRC59Llhrq3oSfrikvwQ5NaxYExr6twkdkMLaKono= +github.com/jackc/pgio v1.0.0 h1:g12B9UwVnzGhueNavwioyEEpAmqMe1E/BN9ES+8ovkE= +github.com/jackc/pgio v1.0.0/go.mod h1:oP+2QK2wFfUWgr+gxjoBH9KGBb31Eio69xUb0w5bYf8= +github.com/jackc/pgmock v0.0.0-20190831213851-13a1b77aafa2/go.mod h1:fGZlG77KXmcq05nJLRkk0+p82V8B8Dw8KN2/V9c/OAE= +github.com/jackc/pgmock v0.0.0-20201204152224-4fe30f7445fd/go.mod h1:hrBW0Enj2AZTNpt/7Y5rr2xe/9Mn757Wtb2xeBzPv2c= +github.com/jackc/pgmock v0.0.0-20210724152146-4ad1a8207f65/go.mod h1:5R2h2EEX+qri8jOWMbJCtaPWkrrNc7OHwsp2TCqp7ak= +github.com/jackc/pgpassfile v1.0.0 h1:/6Hmqy13Ss2zCq62VdNG8tM1wchn8zjSGOBJ6icpsIM= +github.com/jackc/pgpassfile v1.0.0/go.mod h1:CEx0iS5ambNFdcRtxPj5JhEz+xB6uRky5eyVu/W2HEg= +github.com/jackc/pgproto3 v1.1.0 h1:FYYE4yRw+AgI8wXIinMlNjBbp/UitDJwfj5LqqewP1A= +github.com/jackc/pgproto3 v1.1.0/go.mod h1:eR5FA3leWg7p9aeAqi37XOTgTIbkABlvcPB3E5rlc78= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190420180111-c116219b62db/go.mod h1:bhq50y+xrl9n5mRYyCBFKkpRVTLYJVWeCc+mEAI3yXA= +github.com/jackc/pgproto3/v2 v2.0.0-alpha1.0.20190609003834-432c2951c711/go.mod h1:uH0AWtUmuShn0bcesswc4aBTWGvw0cAxIJp+6OB//Wg= +github.com/jackc/pgproto3/v2 v2.0.0-rc3/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.0-rc3.0.20190831210041-4c03ce451f29/go.mod h1:ryONWYqW6dqSg1Lw6vXNMXoBJhpzvWKnT95C46ckYeM= +github.com/jackc/pgproto3/v2 v2.0.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.2/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.0.6/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.1.1/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgproto3/v2 v2.3.0 h1:brH0pCGBDkBW07HWlN/oSBXrmo3WB0UvZd1pIuDcL8Y= +github.com/jackc/pgproto3/v2 v2.3.0/go.mod h1:WfJCnwN3HIg9Ish/j3sgWXnAfK8A9Y0bwXYU5xKaEdA= +github.com/jackc/pgservicefile v0.0.0-20200307190119-3430c5407db8/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20200714003250-2b9c44734f2b/go.mod h1:vsD4gTJCa9TptPL8sPkXrLZ+hDuNrZCnj29CQpr4X1E= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761 h1:iCEnooe7UlwOQYpKFhBabPMi4aNAfoODPEFNiAnClxo= +github.com/jackc/pgservicefile v0.0.0-20240606120523-5a60cdf6a761/go.mod h1:5TJZWKEWniPve33vlWYSoGYefn3gLQRzjfDlhSJ9ZKM= +github.com/jackc/pgtype v0.0.0-20190421001408-4ed0de4755e0/go.mod h1:hdSHsc1V01CGwFsrv11mJRHWJ6aifDLfdV3aVjFF0zg= +github.com/jackc/pgtype v0.0.0-20190824184912-ab885b375b90/go.mod h1:KcahbBH1nCMSo2DXpzsoWOAfFkdEtEJpPbVLq8eE+mc= +github.com/jackc/pgtype v0.0.0-20190828014616-a8802b16cc59/go.mod h1:MWlu30kVJrUS8lot6TQqcg7mtthZ9T0EoIBFiJcmcyw= +github.com/jackc/pgtype v1.2.0/go.mod h1:5m2OfMh1wTK7x+Fk952IDmI4nw3nPrvtQdM0ZT4WpC0= +github.com/jackc/pgtype v1.3.1-0.20200510190516-8cd94a14c75a/go.mod h1:vaogEUkALtxZMCH411K+tKzNpwzCKU+AnPzBKZ+I+Po= +github.com/jackc/pgtype v1.3.1-0.20200606141011-f6355165a91c/go.mod h1:cvk9Bgu/VzJ9/lxTO5R5sf80p0DiucVtN7ZxvaC4GmQ= +github.com/jackc/pgtype v1.4.2/go.mod h1:JCULISAZBFGrHaOXIIFiyfzW5VY0GRitRr8NeJsrdig= +github.com/jackc/pgtype v1.8.1-0.20210724151600-32e20a603178/go.mod h1:C516IlIV9NKqfsMCXTdChteoXmwgUceqaLfjg2e3NlM= +github.com/jackc/pgtype v1.11.0 h1:u4uiGPz/1hryuXzyaBhSk6dnIyyG2683olG2OV+UUgs= +github.com/jackc/pgtype v1.11.0/go.mod h1:LUMuVrfsFfdKGLw+AFFVv6KtHOFMwRgDDzBt76IqCA4= +github.com/jackc/pgx/v4 v4.0.0-20190420224344-cc3461e65d96/go.mod h1:mdxmSJJuR08CZQyj1PVQBHy9XOp5p8/SHH6a0psbY9Y= +github.com/jackc/pgx/v4 v4.0.0-20190421002000-1b8f0016e912/go.mod h1:no/Y67Jkk/9WuGR0JG/JseM9irFbnEPbuWV2EELPNuM= +github.com/jackc/pgx/v4 v4.0.0-pre1.0.20190824185557-6972a5742186/go.mod h1:X+GQnOEnf1dqHGpw7JmHqHc1NxDoalibchSk9/RWuDc= +github.com/jackc/pgx/v4 v4.5.0/go.mod h1:EpAKPLdnTorwmPUUsqrPxy5fphV18j9q3wrfRXgo+kA= +github.com/jackc/pgx/v4 v4.6.1-0.20200510190926-94ba730bb1e9/go.mod h1:t3/cdRQl6fOLDxqtlyhe9UWgfIi9R8+8v8GKV5TRA/o= +github.com/jackc/pgx/v4 v4.6.1-0.20200606145419-4e5062306904/go.mod h1:ZDaNWkt9sW1JMiNn0kdYBaLelIhw7Pg4qd+Vk6tw7Hg= +github.com/jackc/pgx/v4 v4.8.1/go.mod h1:4HOLxrl8wToZJReD04/yB20GDwf4KBYETvlHciCnwW0= +github.com/jackc/pgx/v4 v4.12.1-0.20210724153913-640aa07df17c/go.mod h1:1QD0+tgSXP7iUjYm9C1NxKhny7lq6ee99u/z+IHFcgs= +github.com/jackc/pgx/v4 v4.16.0 h1:4k1tROTJctHotannFYzu77dY3bgtMRymQP7tXQjqpPk= +github.com/jackc/pgx/v4 v4.16.0/go.mod h1:N0A9sFdWzkw/Jy1lwoiB64F2+ugFZi987zRxcPez/wI= +github.com/jackc/pgx/v5 v5.6.0 h1:SWJzexBzPL5jb0GEsrPMLIsi/3jOo7RHlzTjcAeDrPY= +github.com/jackc/pgx/v5 v5.6.0/go.mod h1:DNZ/vlrUnhWCoFGxHAG8U2ljioxukquj7utPDgtQdTw= +github.com/jackc/puddle v0.0.0-20190413234325-e4ced69a3a2b/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v0.0.0-20190608224051-11cab39313c9/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle v1.2.1/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= +github.com/jackc/puddle/v2 v2.2.2 h1:PR8nw+E/1w0GLuRFSmiioY6UooMp6KJv0/61nB7icHo= +github.com/jackc/puddle/v2 v2.2.2/go.mod h1:vriiEXHvEE654aYKXXjOvZM39qJ0q+azkZFrfEOc3H4= +github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= +github.com/jinzhu/gorm v1.9.8/go.mod h1:bdqTT3q6dhSph2K3pWxrHP6nqxuAp2yQ3KFtc3U3F84= +github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/inflection v1.0.0 h1:K317FqzuhWc8YvSVlFMCCUb36O/S9MCKRDI7QkRKD/E= +github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= +github.com/jinzhu/now v1.0.0/go.mod h1:oHTiXerJ20+SfYcrdlBO7rzZRJWGwSTQ0iUY2jI6Gfc= +github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ= +github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= +github.com/joho/godotenv v1.3.0 h1:Zjp+RcGpHhGlrMbJzXTrZZPrWj+1vfm90La1wgB6Bhc= +github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= +github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= +github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= +github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= +github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo= +github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE= +github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= +github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.4/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= +github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0 h1:SOEGU9fKiNWd/HOJuq6+3iTQz8KNCLtVX6idSoTLdUw= +github.com/lann/builder v0.0.0-20180802200727-47ae307949d0/go.mod h1:dXGbAdH5GtBTC4WfIxhKZfyBF/HBFgRZSWwZ9g/He9o= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0 h1:P6pPBnrTSX3DEVR4fDembhRWSsG5rVo6hYhAB/ADZrk= +github.com/lann/ps v0.0.0-20150810152359-62de8c46ede0/go.mod h1:vmVJ0l/dxyfGW6FmdpVm2joNMFikkuWg0EoCKLGUMNw= +github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.3.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= +github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.5/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4= +github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/magiconair/properties v1.8.10 h1:s31yESBquKXCV9a/ScB3ESkOjUYYv+X0rg8SYxI99mE= +github.com/magiconair/properties v1.8.10/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= +github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= +github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= +github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= +github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= +github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= +github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= +github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= +github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103 h1:Z/i1e+gTZrmcGeZyWckaLfucYG6KYOXLWo4co8pZYNY= +github.com/mendsley/gojwk v0.0.0-20141217222730-4d5ec6e58103/go.mod h1:o9YPB5aGP8ob35Vy6+vyq3P3bWe7NQWzf+JLiXCiMaE= +github.com/microcosm-cc/bluemonday v1.0.18/go.mod h1:Z0r70sCuXHig8YpBzCc5eGHAap2K7e/u082ZUpDRRqM= +github.com/microcosm-cc/bluemonday v1.0.23 h1:SMZe2IGa0NuHvnVNAZ+6B38gsTbi5e4sViiWJyDDqFY= +github.com/microcosm-cc/bluemonday v1.0.23/go.mod h1:mN70sk7UkkF8TUr2IGBpNN0jAgStuPzlK76QuruE/z4= +github.com/microsoft/go-mssqldb v1.7.2 h1:CHkFJiObW7ItKTJfHo1QX7QBBD1iV+mn1eOyRP3b/PA= +github.com/microsoft/go-mssqldb v1.7.2/go.mod h1:kOvZKUdrhhFQmxLZqbwUV0rHkNkZpthMITIb2Ko1IoA= +github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= +github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= +github.com/moby/go-archive v0.1.0 h1:Kk/5rdW/g+H8NHdJW2gsXyZ7UnzvJNOy6VKJqueWdcQ= +github.com/moby/go-archive v0.1.0/go.mod h1:G9B+YoujNohJmrIYFBpSd54GTUB4lt9S+xVQvsJyFuo= +github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= +github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc= +github.com/moby/sys/atomicwriter v0.1.0 h1:kw5D/EqkBwsBFi0ss9v1VG3wIkVhzGvLklJ+w3A14Sw= +github.com/moby/sys/atomicwriter v0.1.0/go.mod h1:Ul8oqv2ZMNHOceF643P6FKPXeCmYtlQMvpizfsSoaWs= +github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7zgvCU= +github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= +github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs= +github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= +github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= +github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= +github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0= +github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= +github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= +github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A= +github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= +github.com/onsi/ginkgo/v2 v2.1.3/go.mod h1:vw5CSIxN1JObi/U8gcbwft7ZxR2dgaR70JSE3/PpL4c= +github.com/onsi/ginkgo/v2 v2.1.4/go.mod h1:um6tUpWM/cxCK3/FK8BXqEiUMUwRgSM4JXG47RKZmLU= +github.com/onsi/ginkgo/v2 v2.8.1 h1:xFTEVwOFa1D/Ty24Ws1npBWkDYEV9BqZrsDxVrVkrrU= +github.com/onsi/ginkgo/v2 v2.8.1/go.mod h1:N1/NbDngAFcSLdyZ+/aYTYGSlq9qMCS/cNKGJjy+csc= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= +github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.17.0/go.mod h1:HnhC7FXeEQY45zxNK3PPoIUhzk/80Xly9PcubAlGdZY= +github.com/onsi/gomega v1.19.0/go.mod h1:LY+I3pBVzYsTBU1AnDwOSxaYi9WoWiqgwooUqq9yPro= +github.com/onsi/gomega v1.27.1 h1:rfztXRbg6nv/5f+Raen9RcGoSecHIFgBBLQK3Wdj754= +github.com/onsi/gomega v1.27.1/go.mod h1:aHX5xOykVYzWOV4WqQy0sy8BQptgukenXpCXfadcIAw= +github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= +github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= +github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040= +github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M= +github.com/openshift-online/ocm-sdk-go v0.1.334 h1:45WSkXEsmpGekMa9kO6NpEG8PW5/gfmMekr7kL+1KvQ= +github.com/openshift-online/ocm-sdk-go v0.1.334/go.mod h1:KYOw8kAKAHyPrJcQoVR82CneQ4ofC02Na4cXXaTq4Nw= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw= +github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= +github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= +github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= +github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8= +github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= +github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= +github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= +github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= +github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg= +github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= +github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= +github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= +github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= +github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= +github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= +github.com/segmentio/ksuid v1.0.2 h1:9yBfKyw4ECGTdALaF09Snw3sLJmYIX6AbPJrAy6MrDc= +github.com/segmentio/ksuid v1.0.2/go.mod h1:BXuJDr2byAiHuQaQtSKoXh1J0YmUDurywOXgB2w+OSU= +github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4= +github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM= +github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= +github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= +github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= +github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= +github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= +github.com/shopspring/decimal v0.0.0-20200227202807-02e2044944cc/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= +github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= +github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= +github.com/smarty/assertions v1.15.0 h1:cR//PqUBUiQRakZWqBiFFQ9wb8emQGDb0HeGdqGByCY= +github.com/smarty/assertions v1.15.0/go.mod h1:yABtdzeQs6l1brC900WlRNwj6ZR55d7B+E8C6HtKdec= +github.com/smartystreets/goconvey v1.8.1 h1:qGjIddxOk4grTu9JPOU31tVfq3cNdBlNa5sSznIX1xY= +github.com/smartystreets/goconvey v1.8.1/go.mod h1:+/u4qLyY6x1jReYOp7GOM2FSt8aP9CzCZL03bI28W60= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cobra v0.0.5 h1:f0B+LkLX6DtmRH1isoNA9VTtNUK9K8xYd28JNNfOv/s= +github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= +github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= +github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= +github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= +github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= +github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= +github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U= +github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U= +github.com/testcontainers/testcontainers-go v0.33.0 h1:zJS9PfXYT5O0ZFXM2xxXfk4J5UMw/kRiISng037Gxdw= +github.com/testcontainers/testcontainers-go v0.33.0/go.mod h1:W80YpTa8D5C3Yy16icheD01UTDu+LmXIA2Keo+jWtT8= +github.com/testcontainers/testcontainers-go/modules/postgres v0.33.0 h1:c+Gt+XLJjqFAejgX4hSpnHIpC9eAhvgI/TFWL/PbrFI= +github.com/testcontainers/testcontainers-go/modules/postgres v0.33.0/go.mod h1:I4DazHBoWDyf69ByOIyt3OdNjefiUx372459txOpQ3o= +github.com/tidwall/pretty v0.0.0-20190325153808-1166b9ac2b65/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/urfave/negroni v1.0.0 h1:kIimOitoypq34K7TG7DUaJ9kq/N4Ofuwi1sjz0KipXc= +github.com/urfave/negroni v1.0.0/go.mod h1:Meg73S6kFm/4PpbYdq35yYWoCZ9mS/YSx+lKnmiohz4= +github.com/xdg/scram v0.0.0-20180814205039-7eeb5667e42c/go.mod h1:lB8K/P019DLNhemzwFU4jHLhdvlE6uDZjXFejJXr49I= +github.com/xdg/stringprep v1.0.0/go.mod h1:Jhud4/sHMO4oL310DaZAKk9ZaJ08SJfe+sJh0HrGL1Y= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +github.com/yaacov/tree-search-language v0.0.0-20190923184055-1c2dad2e354b h1:aWR0+NlUGQpFPxpjcYW7oXsN1GnYUVIdB5Act7I6jzc= +github.com/yaacov/tree-search-language v0.0.0-20190923184055-1c2dad2e354b/go.mod h1:uXZEzDS1siuQsBuHL1A4gy27xIsnnL06MhqrwvySsIk= +github.com/yuin/goldmark v1.1.25/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= +github.com/yuin/goldmark v1.4.1/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= +github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/yusufpapurcu/wmi v1.2.4 h1:zFUKzehAFReQwLys1b/iSMl+JQGSCSjtVqQn9bBrPo0= +github.com/yusufpapurcu/wmi v1.2.4/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0= +github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= +go.mongodb.org/mongo-driver v1.0.2/go.mod h1:u7ryQJ+DOzQmeO7zB6MHyr8jkEQvC8vH7qLUO4lqsUM= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= +go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= +go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= +go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= +go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= +go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.38.0 h1:RkfdswUDRimDg0m2Az18RKOsnI8UDzppJAtj01/Ymk8= +go.opentelemetry.io/otel v1.38.0/go.mod h1:zcmtmQ1+YmQM9wrNsTGV/q/uyusom3P8RxwExxkZhjM= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 h1:GqRJVj7UmLjCVyVJ3ZFLdPRmhDUp2zFmQe3RHIOsw24= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0/go.mod h1:ri3aaHSmCTVYu2AWv44YMauwAQc0aqI9gHKIcSbI1pU= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0 h1:aTL7F04bJHUlztTsNGJ2l+6he8c+y/b//eR0jjjemT4= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.38.0/go.mod h1:kldtb7jDTeol0l3ewcmd8SDvx3EmIE7lyvqbasU3QC4= +go.opentelemetry.io/otel/metric v1.38.0 h1:Kl6lzIYGAh5M159u9NgiRkmoMKjvbsKtYRwgfrA6WpA= +go.opentelemetry.io/otel/metric v1.38.0/go.mod h1:kB5n/QoRM8YwmUahxvI3bO34eVtQf2i4utNVLr9gEmI= +go.opentelemetry.io/otel/sdk v1.38.0 h1:l48sr5YbNf2hpCUj/FoGhW9yDkl+Ma+LrVl8qaM5b+E= +go.opentelemetry.io/otel/sdk v1.38.0/go.mod h1:ghmNdGlVemJI3+ZB5iDEuk4bWA3GkTpW+DOoZMYBVVg= +go.opentelemetry.io/otel/trace v1.38.0 h1:Fxk5bKrDZJUH+AMyyIXGcFAPah0oRcT+LuNtJrmcNLE= +go.opentelemetry.io/otel/trace v1.38.0/go.mod h1:j1P9ivuFsTceSWe1oY+EeW3sc+Pp42sO++GHkg4wwhs= +go.opentelemetry.io/proto/otlp v1.7.1 h1:gTOMpGDb0WTBOP8JaO72iL3auEZhVmAQg4ipjOVAtj4= +go.opentelemetry.io/proto/otlp v1.7.1/go.mod h1:b2rVh6rfI/s2pHWNlB7ILJcRALpcNDzKhACevjI+ZnE= +go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= +go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= +go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= +go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190530122614-20be4c3c3ed5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20190911031432-227b76d455e7/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= +golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20220427172511-eb4f295cb31f/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= +golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4= +golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= +golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= +golang.org/x/exp v0.0.0-20190829153037-c13cbed26979/go.mod h1:86+5VVa7VpoJ4kLfm080zCjGlMRFzhUhsZKEZO7MGek= +golang.org/x/exp v0.0.0-20191030013958-a1ab85dbe136/go.mod h1:JXzH8nQsPlswgeRAPE3MuO9GYsAcnJvJ4vnMwN/5qkY= +golang.org/x/exp v0.0.0-20191129062945-2f5052295587/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= +golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= +golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= +golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190523035834-f03afa92d3ff/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= +golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190409202823-959b441ac422/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190909230951-414d861bb4ac/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRuDixDT3tpyyb+LUpUlRWLxfhWrs= +golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY= +golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE= +golang.org/x/mobile v0.0.0-20190509164839-32b2708ab171/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/mod v0.1.0/go.mod h1:0QHyrYULN0/3qlju5TqG8bIK38QM8yzMo5ekMj3DlcY= +golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg= +golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= +golang.org/x/mod v0.6.0-dev.0.20220106191415-9b9b3d81d5e3/go.mod h1:3p9vT2HGsQu2K1YbXdKPJLVgG5VJdoTa1poYQBtP1AY= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520004742-59133d7f0dd7/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200520182314-0ba52f642ac2/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/net v0.0.0-20200625001655-4c5254603344/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200707034311-ab3426394381/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81Ro3o1hOxt32SMVPicZroKQ2sZA= +golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= +golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210614182718-04defd469f4e/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211015210444-4f30a5c0130f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= +golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE= +golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190523182746-aaccbc9213b0/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= +golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.17.0 h1:l60nONMj9l5drqw6jlhIELNv9I0A4OFgRsG9k2oT9Ug= +golang.org/x/sync v0.17.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190813064441-fde4db37ae7a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190826190057-c7b8b68b1456/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200212091648-12a6c2dcc1e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211019181941-9d821ace8654/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220319134239-a9b59b0215f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.36.0 h1:KVRy2GtZBrk1cBYA7MKu5bEZFxQk4NIDV6RLVcC8o0k= +golang.org/x/sys v0.36.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks= +golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= +golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= +golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= +golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4= +golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw= +golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= +golang.org/x/text v0.29.0 h1:1neNs90w9YzJ9BocxfsQNHKuAT4pkghyXc4nhZ6sJvk= +golang.org/x/text v0.29.0/go.mod h1:7MhJOA9CD2qZyOKYazxdYMF85OwPdEr9jTtBpO7ydH4= +golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190521203540-521d6ed310dd/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= +golang.org/x/tools v0.0.0-20190601110225-0abef6e9ecb8/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191216173652-a0e659d51361/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20191227053925-7b8e75db28f4/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200103221440-774c71fcf114/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200117161641-43d50277825c/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200122220014-bf1340f18c4a/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200130002326-2f3ba24bd6e7/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200204074204-1cc6d1ef6c74/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200207183749-b753a1ba74fa/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200212150539-ea181f53ac56/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200224181240-023911ca70b2/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapKDTa4BR/hXlZSLoq2Wpct/0txZ28= +golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= +golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= +golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= +golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= +golang.org/x/tools v0.1.10/go.mod h1:Uh6Zz+xoGYZom868N8YTex3t7RhtHDBrE8Gzo9bV56E= +golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= +google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= +google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= +google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= +google.golang.org/api v0.17.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.18.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.19.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.20.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.22.0/go.mod h1:BwFmGc8tA3vsd7r/7kR8DY7iEEGSU04BFxCo5jP/sfE= +google.golang.org/api v0.24.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.28.0/go.mod h1:lIXQywCXRcnZPGlsd8NbLnOjtAoL6em04bJ9+z0MncE= +google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSrHWM= +google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= +google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= +google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= +google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= +google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20191230161307-f3c370f40bfb/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200115191322-ca5a22157cba/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200122232147-0452cf42e150/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= +google.golang.org/genproto v0.0.0-20200204135345-fa8e72b47b90/go.mod h1:GmwEX6Z4W5gMy59cAlVYjN9JhxgbQH6Gn+gFDQe2lzA= +google.golang.org/genproto v0.0.0-20200212174721-66ed5ce911ce/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= +google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= +google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= +google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f h1:OiFuztEyBivVKDvguQJYWq1yDcfAHIID/FVrPR4oiI0= +google.golang.org/genproto/googleapis/api v0.0.0-20251014184007-4626949a642f/go.mod h1:kprOiu9Tr0JYyD6DORrc4Hfyk3RFXqkQ3ctHEum3ZbM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251014184007-4626949a642f h1:1FTH6cpXFsENbPR5Bu8NQddPSaUUE6NA2XdZdDSAJK4= +google.golang.org/genproto/googleapis/rpc v0.0.0-20251014184007-4626949a642f/go.mod h1:7i2o+ce6H/6BluujYR+kqX3GKH+dChPTQU19wjRPiGk= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +google.golang.org/grpc v1.21.0/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= +google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= +google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= +google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= +google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= +google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= +google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.75.1 h1:/ODCNEuf9VghjgO3rqLcfg8fiOP0nSluljWFlDxELLI= +google.golang.org/grpc v1.75.1/go.mod h1:JtPAzKiq4v1xcAB2hydNlWI2RnF85XXcV0mhKXr2ecQ= +google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= +google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= +google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= +google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= +google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= +google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= +google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= +google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= +google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= +google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= +google.golang.org/protobuf v1.36.10 h1:AYd7cD/uASjIL6Q9LiTjz8JLcrh/88q5UObnmY3aOOE= +google.golang.org/protobuf v1.36.10/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= +gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= +gopkg.in/resty.v1 v1.12.0 h1:CuXP0Pjfw9rOuY6EP+UvtNvt5DSqHpIxILZKT/quCZI= +gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= +gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= +gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= +gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gorm.io/datatypes v1.2.7 h1:ww9GAhF1aGXZY3EB3cJPJ7//JiuQo7DlQA7NNlVaTdk= +gorm.io/datatypes v1.2.7/go.mod h1:M2iO+6S3hhi4nAyYe444Pcb0dcIiOMJ7QHaUXxyiNZY= +gorm.io/driver/mysql v1.0.1/go.mod h1:KtqSthtg55lFp3S5kUXqlGaelnWpKitn4k1xZTnoiPw= +gorm.io/driver/mysql v1.5.6 h1:Ld4mkIickM+EliaQZQx3uOJDJHtrd70MxAUqWqlx3Y8= +gorm.io/driver/mysql v1.5.6/go.mod h1:sEtPWMiqiN1N1cMXoXmBbd8C6/l+TESwriotuRRpkDM= +gorm.io/driver/postgres v1.0.0/go.mod h1:wtMFcOzmuA5QigNsgEIb7O5lhvH1tHAF1RbWmLWV4to= +gorm.io/driver/postgres v1.6.0 h1:2dxzU8xJ+ivvqTRph34QX+WrRaJlmfyPqXmoGVjMBa4= +gorm.io/driver/postgres v1.6.0/go.mod h1:vUw0mrGgrTK+uPHEhAdV4sfFELrByKVGnaVRkXDhtWo= +gorm.io/driver/sqlite v1.1.1/go.mod h1:hm2olEcl8Tmsc6eZyxYSeznnsDaMqamBvEXLNtBg4cI= +gorm.io/driver/sqlite v1.4.3 h1:HBBcZSDnWi5BW3B3rwvVTc510KGkBkexlOg0QrmLUuU= +gorm.io/driver/sqlite v1.4.3/go.mod h1:0Aq3iPO+v9ZKbcdiz8gLWRw5VOPcBOPUQJFLq5e2ecI= +gorm.io/driver/sqlserver v1.0.2/go.mod h1:gb0Y9QePGgqjzrVyTQUZeh9zkd5v0iz71cM1B4ZycEY= +gorm.io/driver/sqlserver v1.6.0 h1:VZOBQVsVhkHU/NzNhRJKoANt5pZGQAS1Bwc6m6dgfnc= +gorm.io/driver/sqlserver v1.6.0/go.mod h1:WQzt4IJo/WHKnckU9jXBLMJIVNMVeTu25dnOzehntWw= +gorm.io/gorm v1.9.19/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.20.0/go.mod h1:0HFTzE/SqkGTzK6TlDPPQbAYCluiVvhzoA1+aVyzenw= +gorm.io/gorm v1.25.7/go.mod h1:hbnx/Oo0ChWMn1BIhpy1oYozzpM15i4YPuHDmfYtwg8= +gorm.io/gorm v1.30.0 h1:qbT5aPv1UH8gI99OsRlvDToLxW5zR7FzS9acZDOZcgs= +gorm.io/gorm v1.30.0/go.mod h1:8Z33v652h4//uMA76KjeDH8mJXPm1QNCYrMeatR0DOE= +gotest.tools/v3 v3.5.2 h1:7koQfIKdy+I8UTetycgUqXWSDwpgv193Ka+qRsmBY8Q= +gotest.tools/v3 v3.5.2/go.mod h1:LtdLGcnqToBH83WByAAi/wiwSFCArdFIUV/xxN4pcjA= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190531162725-42df64e2171a/go.mod h1:wtc9q0E9zm8PjdRMh29DPlTlCCHVzKDwnkT4GskQVzg= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= +rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= +rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= +rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= diff --git a/img.png b/img.png new file mode 100755 index 0000000..9217bd7 Binary files /dev/null and b/img.png differ diff --git a/openapi/openapi.yaml b/openapi/openapi.yaml new file mode 100644 index 0000000..5827187 --- /dev/null +++ b/openapi/openapi.yaml @@ -0,0 +1,1075 @@ +openapi: 3.0.0 +info: + title: HyperFleet API + version: 1.0.0 + contact: + name: HyperFleet Team + description: |- + HyperFleet API provides simple CRUD operations for managing cluster resources and their status history. + + **Architecture**: Simple CRUD only, no business logic, no event creation. + Sentinel operator handles all orchestration logic. + Adapters handle the specifics of managing spec +tags: [] +paths: + /api/hyperfleet/v1/clusters: + get: + operationId: getClusters + summary: List clusters + parameters: + - $ref: '#/components/parameters/QueryParams.page' + - $ref: '#/components/parameters/QueryParams.pageSize' + - $ref: '#/components/parameters/QueryParams.orderBy' + - $ref: '#/components/parameters/QueryParams.order' + - $ref: '#/components/parameters/SearchParams' + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterList' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + post: + operationId: postCluster + summary: Create cluster + description: |- + Create a new cluster resource. + + **Note**: The `status` object in the response is read-only and computed by the service. + It is NOT part of the request body. Initially, status.phase will be "NotReady" and + status.adapters will be empty until adapters POST their status. + parameters: [] + responses: + '201': + description: The request has succeeded and a new resource has been created as a result. + content: + application/json: + schema: + $ref: '#/components/schemas/Cluster' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/ClusterCreateRequest' + /api/hyperfleet/v1/clusters/{cluster_id}: + get: + operationId: getClusterById + summary: Get cluster by ID + parameters: + - $ref: '#/components/parameters/SearchParams' + - name: cluster_id + in: path + required: true + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/Cluster' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/hyperfleet/v1/clusters/{cluster_id}/nodepools: + get: + operationId: getNodePoolsByClusterId + summary: List all nodepools for cluster + description: Returns the list of all nodepools for a cluster + parameters: + - name: cluster_id + in: path + required: true + description: Cluster ID + schema: + type: string + - $ref: '#/components/parameters/QueryParams.page' + - $ref: '#/components/parameters/QueryParams.pageSize' + - $ref: '#/components/parameters/QueryParams.orderBy' + - $ref: '#/components/parameters/QueryParams.order' + - $ref: '#/components/parameters/SearchParams' + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/NodePoolList' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + post: + operationId: createNodePool + summary: Create nodepool + description: Create a NodePool for a cluster + parameters: + - name: cluster_id + in: path + required: true + description: Cluster ID + schema: + type: string + responses: + '201': + description: The request has succeeded and a new resource has been created as a result. + content: + application/json: + schema: + $ref: '#/components/schemas/NodePoolCreateResponse' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/NodePoolCreateRequest' + /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id}: + get: + operationId: getNodePoolById + summary: Get nodepool by ID + description: Returns specific nodepool + parameters: + - name: cluster_id + in: path + required: true + description: Cluster ID + schema: + type: string + - name: nodepool_id + in: path + required: true + description: NodePool ID + schema: + type: string + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/NodePool' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + /api/hyperfleet/v1/clusters/{cluster_id}/nodepools/{nodepool_id}/statuses: + get: + operationId: getNodePoolsStatuses + summary: List all adapter statuses for nodepools + description: Returns current status object for each adapter that has reported status + parameters: + - name: cluster_id + in: path + required: true + description: Cluster ID + schema: + type: string + - name: nodepool_id + in: path + required: true + schema: + type: string + - $ref: '#/components/parameters/QueryParams.page' + - $ref: '#/components/parameters/QueryParams.pageSize' + - $ref: '#/components/parameters/QueryParams.orderBy' + - $ref: '#/components/parameters/QueryParams.order' + - $ref: '#/components/parameters/SearchParams' + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/AdapterStatusList' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' + post: + operationId: postNodePoolStatuses + summary: Create adapter status + description: |- + Adapter creates its initial status object for this cluster. + Returns 409 if adapter already has a status object for this cluster + + Response includes the status id and href for future operations. + Adapter should store the returned id/href to update its status later. + parameters: + - name: cluster_id + in: path + required: true + description: Cluster ID + schema: + type: string + - name: nodepool_id + in: path + required: true + schema: + type: string + responses: + '201': + description: The request has succeeded and a new resource has been created as a result. + content: + application/json: + schema: + $ref: '#/components/schemas/AdapterStatus' + '400': + description: The server could not understand the request due to invalid syntax. + '404': + description: The server cannot find the requested resource. + '409': + description: The request conflicts with the current state of the server. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/AdapterStatusCreateRequest' + /api/hyperfleet/v1/clusters/{cluster_id}/statuses: + get: + operationId: getClusterStatuses + summary: List all adapter statuses for cluster + description: Returns current status object for each adapter that has reported status + parameters: + - name: cluster_id + in: path + required: true + description: Cluster ID + schema: + type: string + - $ref: '#/components/parameters/QueryParams.page' + - $ref: '#/components/parameters/QueryParams.pageSize' + - $ref: '#/components/parameters/QueryParams.orderBy' + - $ref: '#/components/parameters/QueryParams.order' + - $ref: '#/components/parameters/SearchParams' + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/AdapterStatusList' + '404': + description: The server cannot find the requested resource. + post: + operationId: postClusterStatuses + summary: Create adapter status + description: |- + Adapter creates its initial status object for this cluster. + Returns 409 if adapter already has a status object for this cluster + + Response includes the status id and href for future operations. + Adapter should store the returned id/href to update its status later. + parameters: + - name: cluster_id + in: path + required: true + description: Cluster ID + schema: + type: string + responses: + '201': + description: The request has succeeded and a new resource has been created as a result. + content: + application/json: + schema: + $ref: '#/components/schemas/AdapterStatus' + '400': + description: The server could not understand the request due to invalid syntax. + '404': + description: The server cannot find the requested resource. + '409': + description: The request conflicts with the current state of the server. + requestBody: + required: true + content: + application/json: + schema: + $ref: '#/components/schemas/AdapterStatusCreateRequest' + /api/hyperfleet/v1/compatibility: + get: + operationId: getCompatibility + description: Returns the list of all nodepools + parameters: + - $ref: '#/components/parameters/QueryParams.page' + - $ref: '#/components/parameters/QueryParams.pageSize' + - $ref: '#/components/parameters/QueryParams.orderBy' + - $ref: '#/components/parameters/QueryParams.order' + - $ref: '#/components/parameters/SearchParams' + responses: + '200': + description: The request has succeeded. + content: + text/plain: + schema: + type: string + security: + - BearerAuth: [] + /api/hyperfleet/v1/nodepools: + get: + operationId: getNodePools + summary: List all nodepools for cluster + description: Returns the list of all nodepools + parameters: + - $ref: '#/components/parameters/QueryParams.page' + - $ref: '#/components/parameters/QueryParams.pageSize' + - $ref: '#/components/parameters/QueryParams.orderBy' + - $ref: '#/components/parameters/QueryParams.order' + - $ref: '#/components/parameters/SearchParams' + responses: + '200': + description: The request has succeeded. + content: + application/json: + schema: + $ref: '#/components/schemas/NodePoolList' + default: + description: An unexpected error response. + content: + application/json: + schema: + $ref: '#/components/schemas/Error' +components: + parameters: + QueryParams.order: + name: order + in: query + required: false + schema: + $ref: '#/components/schemas/OrderDirection' + default: desc + explode: false + QueryParams.orderBy: + name: orderBy + in: query + required: false + schema: + type: string + default: created_at + explode: false + QueryParams.page: + name: page + in: query + required: false + schema: + type: integer + format: int32 + default: 1 + explode: false + QueryParams.pageSize: + name: pageSize + in: query + required: false + schema: + type: integer + format: int32 + default: 20 + explode: false + SearchParams: + name: search + in: query + required: false + schema: + type: string + explode: false + schemas: + APIResource: + type: object + properties: + labels: + type: object + additionalProperties: + type: string + description: labels for the API resource as pairs of name:value strings + allOf: + - $ref: '#/components/schemas/ObjectReference' + AdapterStatus: + type: object + required: + - adapter + - observed_generation + - conditions + properties: + adapter: + type: string + description: Name of the adapter that generated this status (Validator, DNS...) + observed_generation: + type: integer + format: int32 + description: Which generation for an entity (Clusters, NodePools) was current at the time of creating this status + conditions: + type: array + items: + $ref: '#/components/schemas/Condition' + description: Kubernetes-style conditions tracking adapter state + data: + type: object + additionalProperties: {} + description: Adapter-specific data (structure varies by adapter type) + metadata: + type: object + properties: + job_name: + type: string + job_namespace: + type: string + attempt: + type: integer + format: int32 + started_at: + type: string + completed_at: + type: string + duration: + type: string + description: Metadata about the adapter job/execution + example: + adapter: validator + observed_generation: 1 + conditions: + - adapter: validator + type: Available + status: 'True' + reason: All validations passed + message: All 30 validation tests passed + observed_generation: 1 + created_at: '2021-01-01T00:00:00Z' + updated_at: '2021-01-01T00:00:00Z' + - adapter: validator + type: Applied + status: 'True' + reason: Validation job applied + message: Validation job applied successfully + observed_generation: 1 + created_at: '2021-01-01T00:00:00Z' + updated_at: '2021-01-01T00:00:00Z' + - adapter: validator + type: Health + status: 'True' + reason: Validation job healthy + message: Validation job is healthy + observed_generation: 1 + created_at: '2021-01-01T00:00:00Z' + updated_at: '2021-01-01T00:00:00Z' + data: + providerProperty1: providerValue1 + providerProperty2: providerValue2 + metadata: + job_name: validator-job + job_namespace: default + attempt: 1 + started_at: '2021-01-01T00:00:00Z' + completed_at: '2021-01-01T00:00:00Z' + duration: 10s + AdapterStatusCreateRequest: + type: object + required: + - adapter + - observed_generation + - conditions + properties: + adapter: + type: string + description: Adapter identifier + observed_generation: + type: integer + format: int32 + description: Which cluster generation this status reflects + conditions: + type: array + items: + $ref: '#/components/schemas/Condition' + data: + type: object + additionalProperties: {} + description: Adapter-specific data + metadata: + type: object + additionalProperties: {} + description: Job execution metadata + AdapterStatusList: + type: object + required: + - items + properties: + items: + type: array + items: + $ref: '#/components/schemas/AdapterStatus' + allOf: + - $ref: '#/components/schemas/List' + Cluster: + type: object + required: + - created_at + - updated_at + - created_by + - updated_by + - generation + - status + properties: + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + created_by: + type: string + format: email + updated_by: + type: string + format: email + generation: + type: integer + format: int32 + minimum: 1 + description: Generation field is updated on customer updates, reflecting the version of the "intent" of the customer + status: + $ref: '#/components/schemas/ClusterStatus' + allOf: + - $ref: '#/components/schemas/ClusterBase' + example: + kind: Cluster + id: cluster-123 + href: https://api.hyperfleet.com/v1/clusters/cluster-123 + name: cluster-123 + labels: + environment: production + team: platform + spec: {} + created_at: '2021-01-01T00:00:00Z' + updated_at: '2021-01-01T00:00:00Z' + generation: 1 + status: + phase: Ready + last_transition_time: '2021-01-01T00:00:00Z' + observed_generation: 1 + updated_at: '2021-01-01T00:00:00Z' + adapters: + - adapter: validator + type: Available + status: 'True' + reason: All validations passed + message: All 30 validation tests passed + observed_generation: 1 + created_at: '2021-01-01T00:00:00Z' + updated_at: '2021-01-01T00:00:00Z' + - adapter: dns + type: Available + status: 'True' + reason: DNS records created + message: custom.domain.com created + observed_generation: 1 + created_at: '2021-01-01T00:00:00Z' + updated_at: '2021-01-01T00:00:00Z' + created_by: user-123@example.com + updated_by: user-123@example.com + ClusterBase: + type: object + required: + - kind + - name + - spec + properties: + kind: + type: string + default: Cluster + name: + type: string + minLength: 1 + maxLength: 63 + pattern: ^[a-z0-9-]+$ + description: Cluster name (unique) + spec: + allOf: + - $ref: '#/components/schemas/ClusterSpecCore' + description: |- + Cluster specification + CLM doesn't know how to unmarshall the spec, it only stores and forwards to adapters to do their job + But CLM will validate the schema before accepting the request + allOf: + - $ref: '#/components/schemas/APIResource' + ClusterCreateRequest: + type: object + allOf: + - $ref: '#/components/schemas/ClusterBase' + example: + kind: Cluster + name: cluster-123 + labels: + environment: production + team: platform + spec: {} + ClusterList: + type: object + required: + - items + properties: + items: + type: array + items: + $ref: '#/components/schemas/Cluster' + allOf: + - $ref: '#/components/schemas/List' + ClusterSpecCore: + type: object + description: |- + Core cluster specification. + Accepts any properties as the spec is provider-agnostic. + This is represented as a simple object to allow flexibility. + ClusterStatus: + type: object + required: + - phase + - last_transition_time + - observed_generation + - updated_at + - adapters + properties: + phase: + type: string + enum: + - NotReady + - Ready + - Failed + description: |- + Current cluster phase (native database column). + Updated when adapters report status. + Note: status.phase provides aggregated view from all adapter conditions. + last_transition_time: + type: string + format: date-time + description: |- + When cluster last transitioned (updated by adapters, used by Sentinel for backoff) + Updated when adapters report status if the phase changes + observed_generation: + type: integer + format: int32 + description: |- + Last generation processed by adapters + Updated when adapters report status. + This will be the lowest value of each adapter's observed_generation values + The phase value is based on this generation + updated_at: + type: string + format: date-time + description: |- + Time of the last complete report from adapters + Updated when adapters report status. + Oldest `updated_at` from the adapter conditions + adapters: + type: array + items: + $ref: '#/components/schemas/ConditionAvailable' + description: |- + Cluster status aggregation from all adapters. + + This object is computed by the service and CANNOT be modified directly. + It is aggregated from adapter status updates posted to `/clusters/{id}/statuses`. + + Provides quick overview of which adapters have reported and aggregated phase. + Condition: + type: object + required: + - adapter + - type + - status + - observed_generation + - created_at + - updated_at + properties: + adapter: + type: string + description: Adapter name + type: + type: string + status: + type: string + enum: + - 'True' + - 'False' + - Unknown + description: Condition status + reason: + type: string + description: Machine-readable reason code + message: + type: string + description: Human-readable message + observed_generation: + type: integer + format: int32 + created_at: + type: string + format: date-time + description: |- + Time when the condition was created + - In an adapter reporting conditions `updated_at==created_at` + - In the API, `created_at` doesn't change with new updates from adapters + updated_at: + type: string + format: date-time + description: Time when the condition was updated + last_transition_time: + type: string + format: date-time + description: When this condition last transitioned (computed by service when status changes) + ConditionAvailable: + type: object + required: + - type + properties: + type: + type: string + enum: + - Available + allOf: + - $ref: '#/components/schemas/ConditionBase' + description: |- + StatusAvailable is the status condition from an adapter that is used by CLM to compute the phase. + `phase=ready` when all adaptors report `Available=true` + ConditionBase: + type: object + required: + - adapter + - type + - status + - observed_generation + - created_at + - updated_at + properties: + adapter: + type: string + description: Adapter name + type: + type: string + status: + type: string + enum: + - 'True' + - 'False' + - Unknown + description: Condition status + reason: + type: string + description: Machine-readable reason code + message: + type: string + description: Human-readable message + observed_generation: + type: integer + format: int32 + created_at: + type: string + format: date-time + description: |- + Time when the condition was created + - In an adapter reporting conditions `updated_at==created_at` + - In the API, `created_at` doesn't change with new updates from adapters + updated_at: + type: string + format: date-time + description: Time when the condition was updated + description: |- + Common data for status objects, part of: + - Status entity (stored in DB) + - Request payload + Error: + type: object + properties: + id: + type: string + kind: + type: string + description: Resource kind + href: + type: string + description: Resource URI + code: + type: string + reason: + type: string + operation_id: + type: string + List: + type: object + required: + - kind + - page + - size + - total + - items + properties: + kind: + type: string + page: + type: integer + format: int32 + size: + type: integer + format: int32 + total: + type: integer + format: int32 + items: + type: array + items: {} + NodePool: + type: object + required: + - created_at + - updated_at + - created_by + - updated_by + - owner_references + - status + properties: + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + created_by: + type: string + format: email + updated_by: + type: string + format: email + owner_references: + $ref: '#/components/schemas/ObjectReference' + status: + $ref: '#/components/schemas/NodePoolStatus' + allOf: + - $ref: '#/components/schemas/NodePoolBase' + NodePoolBase: + type: object + required: + - name + - spec + properties: + labels: + type: object + additionalProperties: + type: string + description: labels for the API resource as pairs of name:value strings + id: + type: string + description: Resource identifier + kind: + type: string + description: Resource kind + href: + type: string + description: Resource URI + name: + type: string + description: NodePool name (unique in a cluster) + spec: + allOf: + - $ref: '#/components/schemas/NodePoolSpecCore' + description: |- + Cluster specification + CLM doesn't know how to unmarshall the spec, it only stores and forwards to adapters to do their job + But CLM will validate the schema before accepting the request + NodePoolCreateRequest: + type: object + required: + - name + - spec + properties: + labels: + type: object + additionalProperties: + type: string + description: labels for the API resource as pairs of name:value strings + id: + type: string + description: Resource identifier + kind: + type: string + description: Resource kind + href: + type: string + description: Resource URI + name: + type: string + description: NodePool name (unique in a cluster) + spec: + allOf: + - $ref: '#/components/schemas/NodePoolSpecCore' + description: |- + Cluster specification + CLM doesn't know how to unmarshall the spec, it only stores and forwards to adapters to do their job + But CLM will validate the schema before accepting the request + NodePoolCreateResponse: + type: object + required: + - created_at + - updated_at + - created_by + - updated_by + - owner_references + - status + - name + - spec + properties: + created_at: + type: string + format: date-time + updated_at: + type: string + format: date-time + created_by: + type: string + format: email + updated_by: + type: string + format: email + owner_references: + $ref: '#/components/schemas/ObjectReference' + status: + $ref: '#/components/schemas/NodePoolStatus' + labels: + type: object + additionalProperties: + type: string + description: labels for the API resource as pairs of name:value strings + id: + type: string + description: Resource identifier + kind: + type: string + description: Resource kind + href: + type: string + description: Resource URI + name: + type: string + description: NodePool name (unique in a cluster) + spec: + allOf: + - $ref: '#/components/schemas/NodePoolSpecCore' + description: |- + Cluster specification + CLM doesn't know how to unmarshall the spec, it only stores and forwards to adapters to do their job + But CLM will validate the schema before accepting the request + NodePoolList: + type: object + required: + - items + properties: + items: + type: array + items: + $ref: '#/components/schemas/NodePool' + allOf: + - $ref: '#/components/schemas/List' + NodePoolSpecCore: + type: object + description: |- + Core nodepool specification. + Accepts any properties as the spec is provider-agnostic. + This is represented as a simple object to allow flexibility. + NodePoolStatus: + type: object + required: + - phase + - observed_generation + - last_transition_time + - updated_at + - adapters + properties: + phase: + type: string + enum: + - NotReady + - Ready + - Failed + description: |- + Current NodePool phase (native database column). + Updated when adapters report status. + Note: status.phase provides aggregated view from all adapter conditions. + observed_generation: + type: integer + format: int32 + minimum: 1 + description: |- + Last generation processed by adapters + Updated when adapters report status. + This will be the lowest value of each adapter's observed_generation values + The phase value is based on this generation + last_transition_time: + type: string + format: date-time + description: When NodePool last transitioned (updated by adapters, used by Sentinel for backoff) + updated_at: + type: string + format: date-time + description: |- + Time of the last complete report from adapters + Updated when adapters report status. + Oldest `updated_at` from the adapter conditions + adapters: + type: array + items: + $ref: '#/components/schemas/ConditionAvailable' + description: |- + NodePool status aggregation from all adapters. + + This object is computed by the service and CANNOT be modified directly. + ObjectReference: + type: object + properties: + id: + type: string + description: Resource identifier + kind: + type: string + description: Resource kind + href: + type: string + description: Resource URI + OrderDirection: + type: string + enum: + - asc + - desc + securitySchemes: + BearerAuth: + type: http + scheme: Bearer +servers: + - url: http://localhost:8000 + description: Development + variables: {} diff --git a/openapitools.json b/openapitools.json new file mode 100755 index 0000000..54cbf35 --- /dev/null +++ b/openapitools.json @@ -0,0 +1,8 @@ +{ + "$schema": "node_modules/@openapitools/openapi-generator-cli/config.schema.json", + "spaces": 2, + "generator-cli": { + "version": "5.4.0" + } +} + diff --git a/pkg/api/adapter_status_types.go b/pkg/api/adapter_status_types.go new file mode 100644 index 0000000..767a011 --- /dev/null +++ b/pkg/api/adapter_status_types.go @@ -0,0 +1,104 @@ +package api + +import ( + "encoding/json" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "gorm.io/datatypes" + "gorm.io/gorm" +) + +// AdapterStatus database model +type AdapterStatus struct { + Meta // Contains ID, CreatedAt, UpdatedAt, DeletedAt + + // Polymorphic association + ResourceType string `json:"resource_type" gorm:"size:20;index:idx_resource;not null"` + ResourceID string `json:"resource_id" gorm:"size:255;index:idx_resource;not null"` + + // Adapter information + Adapter string `json:"adapter" gorm:"size:255;not null;uniqueIndex:idx_resource_adapter"` + ObservedGeneration int32 `json:"observed_generation" gorm:"not null"` + + // Stored as JSON + Conditions datatypes.JSON `json:"conditions" gorm:"type:jsonb;not null"` + Data datatypes.JSON `json:"data,omitempty" gorm:"type:jsonb"` + Metadata datatypes.JSON `json:"metadata,omitempty" gorm:"type:jsonb"` +} + +type AdapterStatusList []*AdapterStatus +type AdapterStatusIndex map[string]*AdapterStatus + +func (l AdapterStatusList) Index() AdapterStatusIndex { + index := AdapterStatusIndex{} + for _, o := range l { + index[o.ID] = o + } + return index +} + +func (as *AdapterStatus) BeforeCreate(tx *gorm.DB) error { + as.ID = NewID() + return nil +} + +// ToOpenAPI converts to OpenAPI model +func (as *AdapterStatus) ToOpenAPI() *openapi.AdapterStatus { + // Unmarshal Conditions + var conditions []openapi.Condition + if len(as.Conditions) > 0 { + _ = json.Unmarshal(as.Conditions, &conditions) + } + + // Unmarshal Data + var data map[string]interface{} + if len(as.Data) > 0 { + _ = json.Unmarshal(as.Data, &data) + } + + // Unmarshal Metadata + var metadata *openapi.AdapterStatusMetadata + if len(as.Metadata) > 0 { + _ = json.Unmarshal(as.Metadata, &metadata) + } + + return &openapi.AdapterStatus{ + Adapter: as.Adapter, + ObservedGeneration: as.ObservedGeneration, + Conditions: conditions, + Data: data, + Metadata: metadata, + } +} + +// AdapterStatusFromOpenAPICreate creates GORM model from CreateRequest +func AdapterStatusFromOpenAPICreate( + resourceType, resourceID string, + req *openapi.AdapterStatusCreateRequest, +) *AdapterStatus { + // Marshal Conditions + conditionsJSON, _ := json.Marshal(req.Conditions) + + // Marshal Data + data := make(map[string]interface{}) + if req.Data != nil { + data = req.Data + } + dataJSON, _ := json.Marshal(data) + + // Marshal Metadata (if provided) + var metadataJSON datatypes.JSON + if req.Metadata != nil { + metadataJSON, _ = json.Marshal(req.Metadata) + } + + return &AdapterStatus{ + ResourceType: resourceType, + ResourceID: resourceID, + Adapter: req.Adapter, + ObservedGeneration: req.ObservedGeneration, + Conditions: conditionsJSON, + Data: dataJSON, + Metadata: metadataJSON, + } +} diff --git a/pkg/api/cluster_types.go b/pkg/api/cluster_types.go new file mode 100644 index 0000000..d817813 --- /dev/null +++ b/pkg/api/cluster_types.go @@ -0,0 +1,159 @@ +package api + +import ( + "encoding/json" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "gorm.io/datatypes" + "gorm.io/gorm" +) + +// Cluster database model +type Cluster struct { + Meta // Contains ID, CreatedAt, UpdatedAt, DeletedAt + + // Core fields + Kind string `json:"kind" gorm:"default:'Cluster'"` + Name string `json:"name" gorm:"uniqueIndex;size:63;not null"` + Spec datatypes.JSON `json:"spec" gorm:"type:jsonb;not null"` + Labels datatypes.JSON `json:"labels,omitempty" gorm:"type:jsonb"` + Href string `json:"href,omitempty" gorm:"size:500"` + + // Version control + Generation int32 `json:"generation" gorm:"default:1;not null"` + + // Status fields (expanded to database columns) + StatusPhase string `json:"status_phase" gorm:"default:'NotReady'"` + StatusLastTransitionTime *time.Time `json:"status_last_transition_time,omitempty"` + StatusObservedGeneration int32 `json:"status_observed_generation" gorm:"default:0"` + StatusUpdatedAt *time.Time `json:"status_updated_at,omitempty"` + StatusAdapters datatypes.JSON `json:"status_adapters" gorm:"type:jsonb"` + + // Audit fields + CreatedBy string `json:"created_by" gorm:"size:255;not null"` + UpdatedBy string `json:"updated_by" gorm:"size:255;not null"` +} + +type ClusterList []*Cluster +type ClusterIndex map[string]*Cluster + +func (l ClusterList) Index() ClusterIndex { + index := ClusterIndex{} + for _, o := range l { + index[o.ID] = o + } + return index +} + +func (c *Cluster) BeforeCreate(tx *gorm.DB) error { + c.ID = NewID() + if c.Kind == "" { + c.Kind = "Cluster" + } + if c.Generation == 0 { + c.Generation = 1 + } + if c.StatusPhase == "" { + c.StatusPhase = "NotReady" + } + // Set Href if not already set + if c.Href == "" { + c.Href = "/api/hyperfleet/v1/clusters/" + c.ID + } + return nil +} + +// ToOpenAPI converts to OpenAPI model +func (c *Cluster) ToOpenAPI() *openapi.Cluster { + // Unmarshal Spec + var spec map[string]interface{} + if len(c.Spec) > 0 { + _ = json.Unmarshal(c.Spec, &spec) + } + + // Unmarshal Labels + var labels map[string]string + if len(c.Labels) > 0 { + _ = json.Unmarshal(c.Labels, &labels) + } + + // Unmarshal StatusAdapters + var statusAdapters []openapi.ConditionAvailable + if len(c.StatusAdapters) > 0 { + _ = json.Unmarshal(c.StatusAdapters, &statusAdapters) + } + + // Generate Href if not set (fallback) + href := c.Href + if href == "" { + href = "/api/hyperfleet/v1/clusters/" + c.ID + } + + cluster := &openapi.Cluster{ + Id: &c.ID, + Kind: c.Kind, + Href: &href, + Name: c.Name, + Spec: spec, + Labels: &labels, + Generation: c.Generation, + CreatedAt: c.CreatedAt, + UpdatedAt: c.UpdatedAt, + CreatedBy: c.CreatedBy, + UpdatedBy: c.UpdatedBy, + } + + // Build ClusterStatus + cluster.Status = openapi.ClusterStatus{ + Phase: c.StatusPhase, + ObservedGeneration: c.StatusObservedGeneration, + Adapters: statusAdapters, + } + + if c.StatusLastTransitionTime != nil { + cluster.Status.LastTransitionTime = *c.StatusLastTransitionTime + } + + if c.StatusUpdatedAt != nil { + cluster.Status.UpdatedAt = *c.StatusUpdatedAt + } + + return cluster +} + +// ClusterFromOpenAPICreate creates GORM model from OpenAPI CreateRequest +func ClusterFromOpenAPICreate(req *openapi.ClusterCreateRequest, createdBy string) *Cluster { + // Marshal Spec + specJSON, _ := json.Marshal(req.Spec) + + // Marshal Labels + labels := make(map[string]string) + if req.Labels != nil { + labels = *req.Labels + } + labelsJSON, _ := json.Marshal(labels) + + // Marshal empty StatusAdapters + statusAdaptersJSON, _ := json.Marshal([]openapi.ConditionAvailable{}) + + return &Cluster{ + Kind: req.Kind, + Name: req.Name, + Spec: specJSON, + Labels: labelsJSON, + Generation: 1, + StatusPhase: "NotReady", + StatusObservedGeneration: 0, + StatusAdapters: statusAdaptersJSON, + CreatedBy: createdBy, + UpdatedBy: createdBy, + } +} + +type ClusterPatchRequest struct { + Name *string `json:"name,omitempty"` + Spec *map[string]interface{} `json:"spec,omitempty"` + Generation *int32 `json:"generation,omitempty"` + Labels *map[string]string `json:"labels,omitempty"` +} diff --git a/pkg/api/error.go b/pkg/api/error.go new file mode 100755 index 0000000..f5d990a --- /dev/null +++ b/pkg/api/error.go @@ -0,0 +1,111 @@ +package api + +import ( + "encoding/json" + "fmt" + "net/http" + "os" + + "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +// SendNotFound sends a 404 response with some details about the non existing resource. +func SendNotFound(w http.ResponseWriter, r *http.Request) { + // Set the content type: + w.Header().Set("Content-Type", "application/json") + + // Prepare the body: + id := "404" + reason := fmt.Sprintf( + "The requested resource '%s' doesn't exist", + r.URL.Path, + ) + body := Error{ + Type: ErrorType, + ID: id, + HREF: "/api/hyperfleet/v1/errors/" + id, + Code: "hyperfleet-" + id, + Reason: reason, + } + data, err := json.Marshal(body) + if err != nil { + SendPanic(w, r) + return + } + + // Send the response: + w.WriteHeader(http.StatusNotFound) + _, err = w.Write(data) + if err != nil { + err = fmt.Errorf("can't send response body for request '%s'", r.URL.Path) + glog.Error(err) + return + } +} + +func SendUnauthorized(w http.ResponseWriter, r *http.Request, message string) { + w.Header().Set("Content-Type", "application/json") + + // Prepare the body: + apiError := errors.Unauthorized("%s", message) + data, err := json.Marshal(apiError) + if err != nil { + SendPanic(w, r) + return + } + + // Send the response: + w.WriteHeader(http.StatusUnauthorized) + _, err = w.Write(data) + if err != nil { + err = fmt.Errorf("can't send response body for request '%s'", r.URL.Path) + glog.Error(err) + return + } +} + +// SendPanic sends a panic error response to the client, but it doesn't end the process. +func SendPanic(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + _, err := w.Write(panicBody) + if err != nil { + err = fmt.Errorf( + "can't send panic response for request '%s': %s", + r.URL.Path, + err.Error(), + ) + glog.Error(err) + } +} + +// panicBody is the error body that will be sent when something unexpected happens while trying to +// send another error response. For example, if sending an error response fails because the error +// description can't be converted to JSON. +var panicBody []byte + +func init() { + var err error + + // Create the panic error body: + panicID := "1000" + panicError := Error{ + Type: ErrorType, + ID: panicID, + HREF: "/api/hyperfleet/v1/" + panicID, + Code: "hyperfleet-" + panicID, + Reason: "An unexpected error happened, please check the log of the service " + + "for details", + } + + // Convert it to JSON: + panicBody, err = json.Marshal(panicError) + if err != nil { + err = fmt.Errorf( + "can't create the panic error body: %s", + err.Error(), + ) + glog.Error(err) + os.Exit(1) + } +} diff --git a/pkg/api/error_types.go b/pkg/api/error_types.go new file mode 100755 index 0000000..57a0bac --- /dev/null +++ b/pkg/api/error_types.go @@ -0,0 +1,13 @@ +package api + +// ErrorType is the name of the type used to report errors. +const ErrorType = "Error" + +// Error represents an error reported by the API. +type Error struct { + Type string `json:"type,omitempty"` + ID string `json:"id,omitempty"` + HREF string `json:"href,omitempty"` + Code string `json:"code,omitempty"` + Reason string `json:"reason,omitempty"` +} diff --git a/pkg/api/metadata_types.go b/pkg/api/metadata_types.go new file mode 100755 index 0000000..24b89d4 --- /dev/null +++ b/pkg/api/metadata_types.go @@ -0,0 +1,49 @@ +/* +Copyright (c) 2018 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the API metadata types used by the hyperfleet. + +package api + +import ( + "time" + + "gorm.io/gorm" +) + +// Metadata api metadata. +type Metadata struct { + ID string `json:"id"` + HREF string `json:"href"` + Kind string `json:"kind"` + Version string `json:"version"` + BuildTime string `json:"build_time"` +} + +// Meta is base model definition, embedded in all kinds +type Meta struct { + ID string + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt gorm.DeletedAt `gorm:"index"` +} + +// PagingMeta List Paging metadata +type PagingMeta struct { + Page int + Size int64 + Total int64 +} diff --git a/pkg/api/node_pool_types.go b/pkg/api/node_pool_types.go new file mode 100644 index 0000000..cabfa59 --- /dev/null +++ b/pkg/api/node_pool_types.go @@ -0,0 +1,185 @@ +package api + +import ( + "encoding/json" + "fmt" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "gorm.io/datatypes" + "gorm.io/gorm" +) + +// NodePool database model +type NodePool struct { + Meta // Contains ID, CreatedAt, UpdatedAt, DeletedAt + + // Core fields + Kind string `json:"kind" gorm:"default:'NodePool'"` + Name string `json:"name" gorm:"size:255;not null"` + Spec datatypes.JSON `json:"spec" gorm:"type:jsonb;not null"` + Labels datatypes.JSON `json:"labels,omitempty" gorm:"type:jsonb"` + Href string `json:"href,omitempty" gorm:"size:500"` + + // Owner references (expanded) + OwnerID string `json:"owner_id" gorm:"size:255;not null;index"` + OwnerKind string `json:"owner_kind" gorm:"size:50;not null"` + OwnerHref string `json:"owner_href,omitempty" gorm:"size:500"` + + // Foreign key relationship + Cluster *Cluster `gorm:"foreignKey:OwnerID;references:ID"` + + // Status fields (expanded) + StatusPhase string `json:"status_phase" gorm:"default:'NotReady'"` + StatusObservedGeneration int32 `json:"status_observed_generation" gorm:"default:0"` + StatusLastTransitionTime *time.Time `json:"status_last_transition_time,omitempty"` + StatusUpdatedAt *time.Time `json:"status_updated_at,omitempty"` + StatusAdapters datatypes.JSON `json:"status_adapters" gorm:"type:jsonb"` + + // Audit fields + CreatedBy string `json:"created_by" gorm:"size:255;not null"` + UpdatedBy string `json:"updated_by" gorm:"size:255;not null"` +} + +type NodePoolList []*NodePool +type NodePoolIndex map[string]*NodePool + +func (l NodePoolList) Index() NodePoolIndex { + index := NodePoolIndex{} + for _, o := range l { + index[o.ID] = o + } + return index +} + +func (np *NodePool) BeforeCreate(tx *gorm.DB) error { + np.ID = NewID() + if np.Kind == "" { + np.Kind = "NodePool" + } + if np.OwnerKind == "" { + np.OwnerKind = "Cluster" + } + if np.StatusPhase == "" { + np.StatusPhase = "NotReady" + } + // Set Href if not already set + if np.Href == "" { + np.Href = fmt.Sprintf("/api/hyperfleet/v1/clusters/%s/nodepools/%s", np.OwnerID, np.ID) + } + // Set OwnerHref if not already set + if np.OwnerHref == "" { + np.OwnerHref = "/api/hyperfleet/v1/clusters/" + np.OwnerID + } + return nil +} + +// ToOpenAPI converts to OpenAPI model +func (np *NodePool) ToOpenAPI() *openapi.NodePool { + // Unmarshal Spec + var spec map[string]interface{} + if len(np.Spec) > 0 { + _ = json.Unmarshal(np.Spec, &spec) + } + + // Unmarshal Labels + var labels map[string]string + if len(np.Labels) > 0 { + _ = json.Unmarshal(np.Labels, &labels) + } + + // Unmarshal StatusAdapters + var statusAdapters []openapi.ConditionAvailable + if len(np.StatusAdapters) > 0 { + _ = json.Unmarshal(np.StatusAdapters, &statusAdapters) + } + + // Generate Href if not set (fallback) + href := np.Href + if href == "" { + href = fmt.Sprintf("/api/hyperfleet/v1/clusters/%s/nodepools/%s", np.OwnerID, np.ID) + } + + // Generate OwnerHref if not set (fallback) + ownerHref := np.OwnerHref + if ownerHref == "" { + ownerHref = "/api/hyperfleet/v1/clusters/" + np.OwnerID + } + + kind := np.Kind + nodePool := &openapi.NodePool{ + Id: &np.ID, + Kind: &kind, + Href: &href, + Name: np.Name, + Spec: spec, + Labels: &labels, + OwnerReferences: openapi.ObjectReference{ + Id: &np.OwnerID, + Kind: &np.OwnerKind, + Href: &ownerHref, + }, + CreatedAt: np.CreatedAt, + UpdatedAt: np.UpdatedAt, + CreatedBy: np.CreatedBy, + UpdatedBy: np.UpdatedBy, + } + + // Build NodePoolStatus + nodePool.Status = openapi.NodePoolStatus{ + Phase: np.StatusPhase, + ObservedGeneration: np.StatusObservedGeneration, + Adapters: statusAdapters, + } + + if np.StatusLastTransitionTime != nil { + nodePool.Status.LastTransitionTime = *np.StatusLastTransitionTime + } + + if np.StatusUpdatedAt != nil { + nodePool.Status.UpdatedAt = *np.StatusUpdatedAt + } + + return nodePool +} + +// NodePoolFromOpenAPICreate creates GORM model from OpenAPI CreateRequest +func NodePoolFromOpenAPICreate(req *openapi.NodePoolCreateRequest, ownerID, createdBy string) *NodePool { + // Marshal Spec + specJSON, _ := json.Marshal(req.Spec) + + // Marshal Labels + labels := make(map[string]string) + if req.Labels != nil { + labels = *req.Labels + } + labelsJSON, _ := json.Marshal(labels) + + // Marshal empty StatusAdapters + statusAdaptersJSON, _ := json.Marshal([]openapi.ConditionAvailable{}) + + kind := "NodePool" + if req.Kind != nil { + kind = *req.Kind + } + + return &NodePool{ + Kind: kind, + Name: req.Name, + Spec: specJSON, + Labels: labelsJSON, + OwnerID: ownerID, + OwnerKind: "Cluster", + StatusPhase: "NotReady", + StatusObservedGeneration: 0, + StatusAdapters: statusAdaptersJSON, + CreatedBy: createdBy, + UpdatedBy: createdBy, + } +} + +type NodePoolPatchRequest struct { + Name *string `json:"name,omitempty"` + Spec *map[string]interface{} `json:"spec,omitempty"` + Labels *map[string]string `json:"labels,omitempty"` +} diff --git a/pkg/api/openapi_embed.go b/pkg/api/openapi_embed.go new file mode 100755 index 0000000..e3f69f3 --- /dev/null +++ b/pkg/api/openapi_embed.go @@ -0,0 +1,15 @@ +package api + +import ( + "embed" + "io/fs" +) + +//go:embed openapi/api/openapi.yaml +var openapiFS embed.FS + +// GetOpenAPISpec returns the embedded OpenAPI YAML file contents +func GetOpenAPISpec() ([]byte, error) { + return fs.ReadFile(openapiFS, "openapi/api/openapi.yaml") +} + diff --git a/pkg/api/presenters/cluster.go b/pkg/api/presenters/cluster.go new file mode 100644 index 0000000..e1b2bc6 --- /dev/null +++ b/pkg/api/presenters/cluster.go @@ -0,0 +1,35 @@ +package presenters + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" +) + +// ConvertCluster converts openapi.Cluster to api.Cluster (GORM model) +func ConvertCluster(cluster openapi.Cluster) *api.Cluster { + // Use ClusterFromOpenAPICreate helper + createdBy := cluster.CreatedBy + if createdBy == "" { + createdBy = "system" + } + + req := &openapi.ClusterCreateRequest{ + Kind: cluster.Kind, + Name: cluster.Name, + Spec: cluster.Spec, + Labels: cluster.Labels, + } + + return api.ClusterFromOpenAPICreate(req, createdBy) +} + +// PresentCluster converts api.Cluster (GORM model) to openapi.Cluster +func PresentCluster(cluster *api.Cluster) openapi.Cluster { + // Use the ToOpenAPI method we implemented + result := cluster.ToOpenAPI() + if result == nil { + // Return empty cluster if conversion fails + return openapi.Cluster{} + } + return *result +} diff --git a/pkg/api/presenters/error.go b/pkg/api/presenters/error.go new file mode 100755 index 0000000..4c3ee23 --- /dev/null +++ b/pkg/api/presenters/error.go @@ -0,0 +1,10 @@ +package presenters + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +func PresentError(err *errors.ServiceError) openapi.Error { + return err.AsOpenapiError("") +} diff --git a/pkg/api/presenters/kind.go b/pkg/api/presenters/kind.go new file mode 100755 index 0000000..9803f3a --- /dev/null +++ b/pkg/api/presenters/kind.go @@ -0,0 +1,44 @@ +package presenters + +import ( + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +type KindMappingFunc func(interface{}) string + +var kindRegistry = make(map[string]KindMappingFunc) + +func RegisterKind(objType interface{}, kindValue string) { + typeName := fmt.Sprintf("%T", objType) + kindRegistry[typeName] = func(interface{}) string { + return kindValue + } +} + +func LoadDiscoveredKinds(i interface{}) string { + typeName := fmt.Sprintf("%T", i) + if mappingFunc, found := kindRegistry[typeName]; found { + return mappingFunc(i) + } + return "" +} + +func ObjectKind(i interface{}) *string { + result := "" + + // Check auto-discovered kinds first + if discoveredKind := LoadDiscoveredKinds(i); discoveredKind != "" { + result = discoveredKind + } else { + // Built-in mappings + switch i.(type) { + case errors.ServiceError, *errors.ServiceError: + result = "Error" + } + } + + return openapi.PtrString(result) +} diff --git a/pkg/api/presenters/node_pool.go b/pkg/api/presenters/node_pool.go new file mode 100644 index 0000000..b33df17 --- /dev/null +++ b/pkg/api/presenters/node_pool.go @@ -0,0 +1,40 @@ +package presenters + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" +) + +// ConvertNodePool converts openapi.NodePool to api.NodePool (GORM model) +func ConvertNodePool(nodePool openapi.NodePool) *api.NodePool { + // Use NodePoolFromOpenAPICreate helper + createdBy := nodePool.CreatedBy + if createdBy == "" { + createdBy = "system" + } + + ownerID := "" + if nodePool.OwnerReferences.Id != nil { + ownerID = *nodePool.OwnerReferences.Id + } + + req := &openapi.NodePoolCreateRequest{ + Kind: nodePool.Kind, + Name: nodePool.Name, + Spec: nodePool.Spec, + Labels: nodePool.Labels, + } + + return api.NodePoolFromOpenAPICreate(req, ownerID, createdBy) +} + +// PresentNodePool converts api.NodePool (GORM model) to openapi.NodePool +func PresentNodePool(nodePool *api.NodePool) openapi.NodePool { + // Use the ToOpenAPI method we implemented + result := nodePool.ToOpenAPI() + if result == nil { + // Return empty nodePool if conversion fails + return openapi.NodePool{} + } + return *result +} diff --git a/pkg/api/presenters/object_reference.go b/pkg/api/presenters/object_reference.go new file mode 100755 index 0000000..5b8ec8d --- /dev/null +++ b/pkg/api/presenters/object_reference.go @@ -0,0 +1,35 @@ +package presenters + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" +) + +func PresentReference(id, obj interface{}) openapi.ObjectReference { + refId, ok := makeReferenceId(id) + + if !ok { + return openapi.ObjectReference{} + } + + return openapi.ObjectReference{ + Id: openapi.PtrString(refId), + Kind: ObjectKind(obj), + Href: ObjectPath(refId, obj), + } +} + +func makeReferenceId(id interface{}) (string, bool) { + var refId string + + if i, ok := id.(string); ok { + refId = i + } + + if i, ok := id.(*string); ok { + if i != nil { + refId = *i + } + } + + return refId, refId != "" +} diff --git a/pkg/api/presenters/path.go b/pkg/api/presenters/path.go new file mode 100755 index 0000000..d98dcae --- /dev/null +++ b/pkg/api/presenters/path.go @@ -0,0 +1,50 @@ +package presenters + +import ( + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +type PathMappingFunc func(interface{}) string + +var pathRegistry = make(map[string]PathMappingFunc) + +func RegisterPath(objType interface{}, pathValue string) { + typeName := fmt.Sprintf("%T", objType) + pathRegistry[typeName] = func(interface{}) string { + return pathValue + } +} + +func LoadDiscoveredPaths(i interface{}) string { + typeName := fmt.Sprintf("%T", i) + if mappingFunc, found := pathRegistry[typeName]; found { + return mappingFunc(i) + } + return "" +} + +const ( + BasePath = "/api/hyperfleet/v1" +) + +func ObjectPath(id string, obj interface{}) *string { + return openapi.PtrString(fmt.Sprintf("%s/%s/%s", BasePath, path(obj), id)) +} + +func path(i interface{}) string { + // Check auto-discovered paths first + if discoveredPath := LoadDiscoveredPaths(i); discoveredPath != "" { + return discoveredPath + } + + // Built-in mappings + switch i.(type) { + case errors.ServiceError, *errors.ServiceError: + return "errors" + default: + return "" + } +} diff --git a/pkg/api/presenters/slice_filter.go b/pkg/api/presenters/slice_filter.go new file mode 100755 index 0000000..3f79bbd --- /dev/null +++ b/pkg/api/presenters/slice_filter.go @@ -0,0 +1,232 @@ +package presenters + +import ( + "fmt" + "reflect" + "regexp" + "strings" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +type ProjectionList struct { + Kind string `json:"kind"` + Page int32 `json:"page"` + Size int32 `json:"size"` + Total int32 `json:"total"` + Items []map[string]interface{} `json:"items"` +} + +/* + SliceFilter + +Convert slice of structures to a []byte stream. +Non-existing fields will cause a validation error + +@param fields2Store []string - list of fields to export (from `json` tag) + +@param items []interface{} - slice of structures to export + +@param kind, page, size, total - from openapi.SubscriptionList et al. + +@return []byte +*/ +func SliceFilter(fields2Store []string, model interface{}) (*ProjectionList, *errors.ServiceError) { + if model == nil { + return nil, errors.Validation("Empty model") + } + + // Prepare list of required field + var in = map[string]bool{} + for i := 0; i < len(fields2Store); i++ { + in[fields2Store[i]] = true + } + + reflectValue := reflect.ValueOf(model) + reflectValue = reflect.Indirect(reflectValue) + + // Initialize result structure + result := &ProjectionList{ + Kind: reflectValue.FieldByName("Kind").String(), + Page: int32(reflectValue.FieldByName("Page").Int()), + Size: int32(reflectValue.FieldByName("Size").Int()), + Total: int32(reflectValue.FieldByName("Total").Int()), + Items: nil, + } + + field := reflectValue.FieldByName("Items").Interface() + items := reflect.ValueOf(field) + if items.Len() == 0 { + return result, nil + } + + // Validate model + validateIn := make(map[string]bool) + for key, value := range in { + validateIn[key] = value + } + if err := validate(items.Index(0).Interface(), validateIn, ""); err != nil { + return nil, err + } + + // Convert items + for i := 0; i < items.Len(); i++ { + result.Items = append(result.Items, structToMap(items.Index(i).Interface(), in, "")) + } + return result, nil +} + +func validate(model interface{}, in map[string]bool, prefix string) *errors.ServiceError { + if model == nil { + return errors.Validation("Empty model") + } + + v := reflect.TypeOf(model) + reflectValue := reflect.ValueOf(model) + reflectValue = reflect.Indirect(reflectValue) + + if v.Kind() == reflect.Pointer { + v = v.Elem() + } + + for i := 0; i < v.NumField(); i++ { + t := v.Field(i) + tag := t.Tag.Get("json") + if tag == "" || tag == "-" { + continue + } + ttype := reflectValue.Field(i) + kind := ttype.Kind() + if kind == reflect.Pointer { + kind = ttype.Elem().Kind() + } + field := reflectValue.Field(i).Interface() + name := strings.Split(tag, ",")[0] + if kind == reflect.Struct { + if t.Type == reflect.TypeOf(&time.Time{}) { + delete(in, name) + } else { + star := name + ".*" + if _, ok := in[star]; ok { + in = removeStar(in, name) + } else { + _ = validate(field, in, name) + } + } + } else if t.Type.Kind() == reflect.Slice { + // TODO: We don't support Slices' validation :( + in = removeStar(in, name) + continue + //_ = validate(slice, in, name) + } else { + prefixedName := name + if prefix != "" { + prefixedName = fmt.Sprintf("%s.%s", prefix, name) + } + delete(in, prefixedName) + } + } + + // All fields present in data struct + if len(in) == 0 { + return nil + } + + var fields []string + for k := range in { + fields = append(fields, k) + } + message := fmt.Sprintf("The following field(s) doesn't exist in `%s`: %s", + reflect.TypeOf(model).Name(), strings.Join(fields, ", ")) + return errors.Validation("%s", message) +} + +func removeStar(in map[string]bool, name string) map[string]bool { + pattern := `(` + name + `\..*)` + pat, _ := regexp.Compile(pattern) + for k := range in { + matched := pat.FindAllString(k, -1) + for _, m := range matched { + delete(in, m) + } + } + + return in +} + +func structToMap(item interface{}, in map[string]bool, prefix string) map[string]interface{} { + res := map[string]interface{}{} + + if item == nil { + return res + } + v := reflect.TypeOf(item) + reflectValue := reflect.ValueOf(item) + reflectValue = reflect.Indirect(reflectValue) + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + for i := 0; i < v.NumField(); i++ { + t := v.Field(i) + tag := t.Tag.Get("json") + if tag == "" || tag == "-" { + continue + } + ttype := reflectValue.Field(i) + kind := ttype.Kind() + if kind == reflect.Pointer { + kind = ttype.Elem().Kind() + } + field := reflectValue.Field(i).Interface() + name := strings.Split(tag, ",")[0] + if kind == reflect.Struct { + if t.Type == reflect.TypeOf(&time.Time{}) { + if _, ok := in[name]; ok { + res[name] = field.(*time.Time).Format(time.RFC3339) + } + } else { + nexPrefix := name + if prefix != "" { + nexPrefix = prefix + "." + name + } + subStruct := structToMap(field, in, nexPrefix) + if len(subStruct) > 0 { + res[name] = subStruct + } + } + } else if kind == reflect.Slice { + s := reflect.ValueOf(field) + if s.Len() > 0 { + result := make([]interface{}, 0, s.Len()) + for i := 0; i < s.Len(); i++ { + slice := structToMap(s.Index(i).Interface(), in, name) + if len(slice) == 0 { + break + } + result = append(result, slice) + } + if len(result) > 0 { + res[name] = result + } + } + } else { + prefixedName := name + if prefix != "" { + prefixedName = fmt.Sprintf("%s.%s", prefix, name) + } + if _, ok := in[prefixedName]; ok { + res[name] = field + } else { + prefixedStar := fmt.Sprintf("%s.*", prefix) + if _, ok := in[prefixedStar]; ok { + res[name] = field + } + } + } + } + + return res +} diff --git a/pkg/api/presenters/time.go b/pkg/api/presenters/time.go new file mode 100755 index 0000000..99cdd2b --- /dev/null +++ b/pkg/api/presenters/time.go @@ -0,0 +1,14 @@ +package presenters + +import ( + "time" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/util" +) + +func PresentTime(t time.Time) *time.Time { + if t.IsZero() { + return util.ToPtr(time.Time{}) + } + return util.ToPtr(t.Round(time.Microsecond)) +} diff --git a/pkg/api/resource_id.go b/pkg/api/resource_id.go new file mode 100755 index 0000000..cdd71af --- /dev/null +++ b/pkg/api/resource_id.go @@ -0,0 +1,7 @@ +package api + +import "github.com/segmentio/ksuid" + +func NewID() string { + return ksuid.New().String() +} diff --git a/pkg/api/version.go b/pkg/api/version.go new file mode 100755 index 0000000..8eb0e1d --- /dev/null +++ b/pkg/api/version.go @@ -0,0 +1,25 @@ +/* +Copyright (c) 2018 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This file contains the version information that is set at build time. + +package api + +// Version is the application version set at compile time via ldflags +var Version = "unknown" + +// BuildTime is the time when the binary was built, set at compile time via ldflags +var BuildTime = "unknown" diff --git a/pkg/auth/auth_middleware.go b/pkg/auth/auth_middleware.go new file mode 100755 index 0000000..0117f63 --- /dev/null +++ b/pkg/auth/auth_middleware.go @@ -0,0 +1,40 @@ +package auth + +import ( + "fmt" + "net/http" + + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +type JWTMiddleware interface { + AuthenticateAccountJWT(next http.Handler) http.Handler +} + +type Middleware struct{} + +var _ JWTMiddleware = &Middleware{} + +func NewAuthMiddleware() (*Middleware, error) { + middleware := Middleware{} + return &middleware, nil +} + +// AuthenticateAccountJWT Middleware handler to validate JWT tokens and authenticate users +func (a *Middleware) AuthenticateAccountJWT(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + payload, err := GetAuthPayload(r) + if err != nil { + handleError(ctx, w, errors.ErrorUnauthorized, fmt.Sprintf("Unable to get payload details from JWT token: %s", err)) + return + } + + // Append the username to the request context + ctx = SetUsernameContext(ctx, payload.Username) + *r = *r.WithContext(ctx) + + next.ServeHTTP(w, r) + }) +} diff --git a/pkg/auth/auth_middleware_mock.go b/pkg/auth/auth_middleware_mock.go new file mode 100755 index 0000000..5deef8e --- /dev/null +++ b/pkg/auth/auth_middleware_mock.go @@ -0,0 +1,16 @@ +package auth + +import ( + "net/http" +) + +type MiddlewareMock struct{} + +var _ JWTMiddleware = &MiddlewareMock{} + +func (a *MiddlewareMock) AuthenticateAccountJWT(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // TODO need to append a username to the request context + next.ServeHTTP(w, r) + }) +} diff --git a/pkg/auth/authz_middleware.go b/pkg/auth/authz_middleware.go new file mode 100755 index 0000000..508855a --- /dev/null +++ b/pkg/auth/authz_middleware.go @@ -0,0 +1,72 @@ +package auth + +/* + The goal of this simple authz middlewre is to provide a way for access review + parameters to be declared for each route in a microservice. This is not meant + to handle more complex access review calls in particular scopes, but rather + just authz calls at the application scope + + This is a big TODO, not ready for consumption +*/ + +import ( + "fmt" + "net/http" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/client/ocm" +) + +type AuthorizationMiddleware interface { + AuthorizeApi(next http.Handler) http.Handler +} + +type authzMiddleware struct { + action string + resourceType string + + ocmClient *ocm.Client +} + +var _ AuthorizationMiddleware = &authzMiddleware{} + +func NewAuthzMiddleware(ocmClient *ocm.Client, action, resourceType string) AuthorizationMiddleware { + return &authzMiddleware{ + ocmClient: ocmClient, + action: action, + resourceType: resourceType, + } +} + +func (a authzMiddleware) AuthorizeApi(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + // Get username from context + username := GetUsernameFromContext(ctx) + if username == "" { + _ = fmt.Errorf("authenticated username not present in request context") + // TODO + //body := api.E500.Format(r, "Authentication details not present in context") + //api.SendError(w, r, &body) + return + } + + allowed, err := a.ocmClient.Authorization.AccessReview( + ctx, username, a.action, a.resourceType, "", "", "") + if err != nil { + _ = fmt.Errorf("unable to make authorization request: %s", err) + // TODO + //body := api.E500.Format(r, "Unable to make authorization request") + //api.SendError(w, r, &body) + return + } + + if allowed { + next.ServeHTTP(w, r) + } + + // TODO + //body := api.E403.Format(r, "") + //api.SendError(w, r, &body) + }) +} diff --git a/pkg/auth/authz_middleware_mock.go b/pkg/auth/authz_middleware_mock.go new file mode 100755 index 0000000..9a0a345 --- /dev/null +++ b/pkg/auth/authz_middleware_mock.go @@ -0,0 +1,22 @@ +package auth + +import ( + "net/http" + + "github.com/golang/glog" +) + +type authzMiddlewareMock struct{} + +var _ AuthorizationMiddleware = &authzMiddlewareMock{} + +func NewAuthzMiddlewareMock() AuthorizationMiddleware { + return &authzMiddlewareMock{} +} + +func (a authzMiddlewareMock) AuthorizeApi(next http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + glog.Infof("Mock authz allows / for %q/%q", r.Method, r.URL) + next.ServeHTTP(w, r) + }) +} diff --git a/pkg/auth/context.go b/pkg/auth/context.go new file mode 100755 index 0000000..2bd6294 --- /dev/null +++ b/pkg/auth/context.go @@ -0,0 +1,115 @@ +package auth + +import ( + "context" + "fmt" + "net/http" + "strings" + + "github.com/golang-jwt/jwt/v4" + "github.com/openshift-online/ocm-sdk-go/authentication" +) + +// Context key type defined to avoid collisions in other pkgs using context +// See https://golang.org/pkg/context/#WithValue +type contextKey string + +const ( + ContextUsernameKey contextKey = "username" + + // Does not use contextKey type because the jwt middleware improperly updates context with string key type + // See https://github.com/auth0/go-jwt-middleware/blob/master/jwtmiddleware.go#L232 + ContextAuthKey string = "user" +) + +// AuthPayload defines the structure of the JWT payload we expect from +// RHD JWT tokens +type Payload struct { + Username string `json:"username"` + FirstName string `json:"first_name"` + LastName string `json:"last_name"` + Email string `json:"email"` + Issuer string `json:"iss"` + ClientID string `json:"clientId"` +} + +func SetUsernameContext(ctx context.Context, username string) context.Context { + return context.WithValue(ctx, ContextUsernameKey, username) +} + +func GetUsernameFromContext(ctx context.Context) string { + username := ctx.Value(ContextUsernameKey) + if username == nil { + return "" + } + return username.(string) +} + +// GetAuthPayloadFromContext Get authorization payload api object from context +func GetAuthPayloadFromContext(ctx context.Context) (*Payload, error) { + // Get user token from request context and validate + userToken, err := authentication.TokenFromContext(ctx) + if err != nil { + return nil, fmt.Errorf("unable to retrieve JWT token from request context: %v", err) + } + + if userToken == nil { + return nil, fmt.Errorf("JWT token in context is nil, unauthorized") + } + + // Username is stored in token claim with key 'sub' + claims, ok := userToken.Claims.(jwt.MapClaims) + if !ok { + err := fmt.Errorf("unable to parse JWT token claims: %#v", userToken.Claims) + return nil, err + } + + // TODO figure out how to unmarshal jwt.mapclaims into the struct to avoid all the + // type assertions + // + //var accountAuth api.AuthPayload + //err := json.Unmarshal([]byte(claims), &accountAuth) + //if err != nil { + // err := fmt.Errorf("Unable to parse JWT token claims") + // return nil, err + //} + + payload := &Payload{} + // default to the values we expect from RHSSO + payload.Username, _ = claims["username"].(string) + payload.FirstName, _ = claims["first_name"].(string) + payload.LastName, _ = claims["last_name"].(string) + payload.Email, _ = claims["email"].(string) + payload.ClientID, _ = claims["clientId"].(string) + + // Check values, if empty, use alternative claims from RHD + if payload.Username == "" { + payload.Username, _ = claims["preferred_username"].(string) + } + + if payload.FirstName == "" { + payload.FirstName, _ = claims["given_name"].(string) + } + + if payload.LastName == "" { + payload.LastName, _ = claims["family_name"].(string) + } + + // If given and family names are not present, use the name field + if payload.FirstName == "" || payload.LastName == "" { + name, _ := claims["name"].(string) + names := strings.Split(name, " ") + if len(names) > 1 { + payload.FirstName = names[0] + payload.LastName = names[1] + } else { + payload.FirstName = names[0] + } + } + + return payload, nil +} + +func GetAuthPayload(r *http.Request) (*Payload, error) { + return GetAuthPayloadFromContext(r.Context()) +} diff --git a/pkg/auth/helpers.go b/pkg/auth/helpers.go new file mode 100755 index 0000000..8e5da26 --- /dev/null +++ b/pkg/auth/helpers.go @@ -0,0 +1,33 @@ +package auth + +import ( + "context" + "encoding/json" + "net/http" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +func handleError(ctx context.Context, w http.ResponseWriter, code errors.ServiceErrorCode, reason string) { + log := logger.NewOCMLogger(ctx) + operationID := logger.GetOperationID(ctx) + err := errors.New(code, "%s", reason) + if err.HttpCode >= 400 && err.HttpCode <= 499 { + log.Infof(err.Error()) + } else { + log.Error(err.Error()) + } + + writeJSONResponse(w, err.HttpCode, err.AsOpenapiError(operationID)) +} + +func writeJSONResponse(w http.ResponseWriter, code int, payload interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + + if payload != nil { + response, _ := json.Marshal(payload) + _, _ = w.Write(response) + } +} diff --git a/pkg/client/ocm/authorization.go b/pkg/client/ocm/authorization.go new file mode 100755 index 0000000..29d3bf3 --- /dev/null +++ b/pkg/client/ocm/authorization.go @@ -0,0 +1,77 @@ +package ocm + +import ( + "context" + "fmt" + + azv1 "github.com/openshift-online/ocm-sdk-go/authorizations/v1" +) + +type Authorization interface { + SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) + AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) +} + +type authorization service + +var _ Authorization = &authorization{} + +func (a authorization) SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { + con := a.client.connection + selfAccessReview := con.Authorizations().V1().SelfAccessReview() + + request, err := azv1.NewSelfAccessReviewRequest(). + Action(action). + ResourceType(resourceType). + OrganizationID(organizationID). + ClusterID(clusterID). + SubscriptionID(subscriptionID). + Build() + if err != nil { + return false, err + } + + postResp, err := selfAccessReview.Post(). + Request(request). + SendContext(ctx) + if err != nil { + return false, err + } + response, ok := postResp.GetResponse() + if !ok { + return false, fmt.Errorf("empty response from authorization post request") + } + + return response.Allowed(), nil +} + +func (a authorization) AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { + con := a.client.connection + accessReview := con.Authorizations().V1().AccessReview() + + request, err := azv1.NewAccessReviewRequest(). + AccountUsername(username). + Action(action). + ResourceType(resourceType). + OrganizationID(organizationID). + ClusterID(clusterID). + SubscriptionID(subscriptionID). + Build() + if err != nil { + return false, err + } + + postResp, err := accessReview.Post(). + Request(request). + SendContext(ctx) + if err != nil { + return false, err + } + + response, ok := postResp.GetResponse() + if !ok { + return false, fmt.Errorf("empty response from authorization post request") + } + + return response.Allowed(), nil +} diff --git a/pkg/client/ocm/authorization_mock.go b/pkg/client/ocm/authorization_mock.go new file mode 100755 index 0000000..e5ee369 --- /dev/null +++ b/pkg/client/ocm/authorization_mock.go @@ -0,0 +1,18 @@ +package ocm + +import ( + "context" +) + +// authorizationMock returns allowed=true for every request +type authorizationMock service + +var _ Authorization = &authorizationMock{} + +func (a authorizationMock) SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { + return true, nil +} + +func (a authorizationMock) AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { + return true, nil +} diff --git a/pkg/client/ocm/client.go b/pkg/client/ocm/client.go new file mode 100755 index 0000000..bcdc895 --- /dev/null +++ b/pkg/client/ocm/client.go @@ -0,0 +1,86 @@ +package ocm + +import ( + "fmt" + + sdkClient "github.com/openshift-online/ocm-sdk-go" +) + +type Client struct { + config *Config + logger sdkClient.Logger + connection *sdkClient.Connection + + Authorization Authorization +} + +type Config struct { + BaseURL string + ClientID string + ClientSecret string + SelfToken string + TokenURL string + Debug bool +} + +func NewClient(config Config) (*Client, error) { + // Create a logger that has the debug level enabled: + logger, err := sdkClient.NewGoLoggerBuilder(). + Debug(config.Debug). + Build() + if err != nil { + return nil, fmt.Errorf("unable to build OCM logger: %s", err.Error()) + } + + client := &Client{ + config: &config, + logger: logger, + } + err = client.newConnection() + if err != nil { + return nil, fmt.Errorf("unable to build OCM connection: %s", err.Error()) + } + client.Authorization = &authorization{client: client} + return client, nil +} + +func NewClientMock(config Config) (*Client, error) { + client := &Client{ + config: &config, + } + client.Authorization = &authorizationMock{client: client} + return client, nil +} + +func (c *Client) newConnection() error { + builder := sdkClient.NewConnectionBuilder(). + Logger(c.logger). + URL(c.config.BaseURL). + MetricsSubsystem("api_outbound") + + if c.config.ClientID != "" && c.config.ClientSecret != "" { + builder = builder.Client(c.config.ClientID, c.config.ClientSecret) + } else if c.config.SelfToken != "" { + builder = builder.Tokens(c.config.SelfToken) + } else { + return fmt.Errorf("can't build OCM client connection: no Client/Secret or Token has been provided") + } + + connection, err := builder.Build() + + if err != nil { + return fmt.Errorf("can't build OCM client connection: %s", err.Error()) + } + c.connection = connection + return nil +} + +func (c *Client) Close() { + if c.connection != nil { + c.connection.Close() + } +} + +type service struct { + client *Client +} diff --git a/pkg/config/config.go b/pkg/config/config.go new file mode 100755 index 0000000..f7c44ff --- /dev/null +++ b/pkg/config/config.go @@ -0,0 +1,128 @@ +package config + +import ( + "flag" + "fmt" + "os" + "path/filepath" + "runtime" + "strconv" + "strings" + + "github.com/spf13/pflag" +) + +type ApplicationConfig struct { + Server *ServerConfig `json:"server"` + Metrics *MetricsConfig `json:"metrics"` + HealthCheck *HealthCheckConfig `json:"health_check"` + Database *DatabaseConfig `json:"database"` + OCM *OCMConfig `json:"ocm"` +} + +func NewApplicationConfig() *ApplicationConfig { + return &ApplicationConfig{ + Server: NewServerConfig(), + Metrics: NewMetricsConfig(), + HealthCheck: NewHealthCheckConfig(), + Database: NewDatabaseConfig(), + OCM: NewOCMConfig(), + } +} + +func (c *ApplicationConfig) AddFlags(flagset *pflag.FlagSet) { + flagset.AddGoFlagSet(flag.CommandLine) + c.Server.AddFlags(flagset) + c.Metrics.AddFlags(flagset) + c.HealthCheck.AddFlags(flagset) + c.Database.AddFlags(flagset) + c.OCM.AddFlags(flagset) +} + +func (c *ApplicationConfig) ReadFiles() []string { + readFiles := []struct { + f func() error + name string + }{ + {c.Server.ReadFiles, "Server"}, + {c.Database.ReadFiles, "Database"}, + {c.OCM.ReadFiles, "OCM"}, + {c.Metrics.ReadFiles, "Metrics"}, + {c.HealthCheck.ReadFiles, "HealthCheck"}, + } + var messages []string + for _, rf := range readFiles { + if err := rf.f(); err != nil { + msg := fmt.Sprintf("%s %s", rf.name, err.Error()) + messages = append(messages, msg) + } + } + return messages +} + +// Read the contents of file into integer value +func readFileValueInt(file string, val *int) error { + fileContents, err := ReadFile(file) + if err != nil { + return err + } + + *val, err = strconv.Atoi(fileContents) + return err +} + +// Read the contents of file into string value +func readFileValueString(file string, val *string) error { + fileContents, err := ReadFile(file) + if err != nil { + return err + } + + *val = strings.TrimSuffix(fileContents, "\n") + return err +} + +// Read the contents of file into boolean value +func readFileValueBool(file string, val *bool) error { + fileContents, err := ReadFile(file) + if err != nil { + return err + } + + *val, err = strconv.ParseBool(fileContents) + return err +} + +func ReadFile(file string) (string, error) { + // If the value is in quotes, unquote it + unquotedFile, err := strconv.Unquote(file) + if err != nil { + // values without quotes will raise an error, ignore it. + unquotedFile = file + } + + // If no file is provided, leave val unchanged. + if unquotedFile == "" { + return "", nil + } + + // Ensure the absolute file path is used + absFilePath := unquotedFile + if !filepath.IsAbs(unquotedFile) { + absFilePath = filepath.Join(GetProjectRootDir(), unquotedFile) + } + + // Read the file + buf, err := os.ReadFile(absFilePath) + if err != nil { + return "", err + } + return string(buf), nil +} + +// GetProjectRootDir Return project root path based on the relative path of this file +func GetProjectRootDir() string { + _, b, _, _ := runtime.Caller(0) + basepath := filepath.Dir(filepath.Join(b, "..", "..")) + return basepath +} diff --git a/pkg/config/config_test.go b/pkg/config/config_test.go new file mode 100755 index 0000000..453f2ab --- /dev/null +++ b/pkg/config/config_test.go @@ -0,0 +1,80 @@ +package config + +import ( + "log" + "os" + "testing" + + . "github.com/onsi/gomega" +) + +func TestConfigReadStringFile(t *testing.T) { + RegisterTestingT(t) + + stringFile, err := createConfigFile("string", "example\n") + defer os.Remove(stringFile.Name()) + if err != nil { + log.Fatal(err) + } + + var stringConfig string + err = readFileValueString(stringFile.Name(), &stringConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(stringConfig).To(Equal("example")) +} + +func TestConfigReadIntFile(t *testing.T) { + RegisterTestingT(t) + + intFile, err := createConfigFile("int", "123") + defer os.Remove(intFile.Name()) + if err != nil { + log.Fatal(err) + } + + var intConfig int + err = readFileValueInt(intFile.Name(), &intConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(intConfig).To(Equal(123)) +} + +func TestConfigReadBoolFile(t *testing.T) { + RegisterTestingT(t) + + boolFile, err := createConfigFile("bool", "true") + defer os.Remove(boolFile.Name()) + if err != nil { + log.Fatal(err) + } + + var boolConfig = false + err = readFileValueBool(boolFile.Name(), &boolConfig) + Expect(err).NotTo(HaveOccurred()) + Expect(boolConfig).To(Equal(true)) +} + +func TestConfigReadQuotedFile(t *testing.T) { + RegisterTestingT(t) + + stringFile, err := createConfigFile("string", "example") + defer os.Remove(stringFile.Name()) + if err != nil { + log.Fatal(err) + } + + quotedFileName := "\"" + stringFile.Name() + "\"" + val, err := ReadFile(quotedFileName) + Expect(err).NotTo(HaveOccurred()) + Expect(val).To(Equal("example")) +} +func createConfigFile(namePrefix, contents string) (*os.File, error) { + configFile, err := os.CreateTemp("", namePrefix) + if err != nil { + return nil, err + } + if _, err = configFile.Write([]byte(contents)); err != nil { + return configFile, err + } + err = configFile.Close() + return configFile, err +} diff --git a/pkg/config/db.go b/pkg/config/db.go new file mode 100755 index 0000000..65c95ae --- /dev/null +++ b/pkg/config/db.go @@ -0,0 +1,119 @@ +package config + +import ( + "fmt" + + "github.com/spf13/pflag" +) + +type DatabaseConfig struct { + Dialect string `json:"dialect"` + SSLMode string `json:"sslmode"` + Debug bool `json:"debug"` + MaxOpenConnections int `json:"max_connections"` + + Host string `json:"host"` + Port int `json:"port"` + Name string `json:"name"` + Username string `json:"username"` + Password string `json:"password"` + + HostFile string `json:"host_file"` + PortFile string `json:"port_file"` + NameFile string `json:"name_file"` + UsernameFile string `json:"username_file"` + PasswordFile string `json:"password_file"` + RootCertFile string `json:"certificate_file"` +} + +func NewDatabaseConfig() *DatabaseConfig { + return &DatabaseConfig{ + Dialect: "postgres", + SSLMode: "disable", + Debug: false, + MaxOpenConnections: 50, + + HostFile: "secrets/db.host", + PortFile: "secrets/db.port", + NameFile: "secrets/db.name", + UsernameFile: "secrets/db.user", + PasswordFile: "secrets/db.password", + RootCertFile: "secrets/db.rootcert", + } +} + +func (c *DatabaseConfig) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&c.HostFile, "db-host-file", c.HostFile, "Database host string file") + fs.StringVar(&c.PortFile, "db-port-file", c.PortFile, "Database port file") + fs.StringVar(&c.UsernameFile, "db-user-file", c.UsernameFile, "Database username file") + fs.StringVar(&c.PasswordFile, "db-password-file", c.PasswordFile, "Database password file") + fs.StringVar(&c.NameFile, "db-name-file", c.NameFile, "Database name file") + fs.StringVar(&c.RootCertFile, "db-rootcert", c.RootCertFile, "Database root certificate file") + fs.StringVar(&c.SSLMode, "db-sslmode", c.SSLMode, "Database ssl mode (disable | require | verify-ca | verify-full)") + fs.BoolVar(&c.Debug, "enable-db-debug", c.Debug, " framework's debug mode") + fs.IntVar(&c.MaxOpenConnections, "db-max-open-connections", c.MaxOpenConnections, "Maximum open DB connections for this instance") +} + +func (c *DatabaseConfig) ReadFiles() error { + err := readFileValueString(c.HostFile, &c.Host) + if err != nil { + return err + } + + err = readFileValueInt(c.PortFile, &c.Port) + if err != nil { + return err + } + + err = readFileValueString(c.UsernameFile, &c.Username) + if err != nil { + return err + } + + err = readFileValueString(c.PasswordFile, &c.Password) + if err != nil { + return err + } + + err = readFileValueString(c.NameFile, &c.Name) + return err +} + +func (c *DatabaseConfig) ConnectionString(withSSL bool) string { + return c.ConnectionStringWithName(c.Name, withSSL) +} + +func (c *DatabaseConfig) ConnectionStringWithName(name string, withSSL bool) string { + var cmd string + if withSSL { + cmd = fmt.Sprintf( + "host=%s port=%d user=%s password='%s' dbname=%s sslmode=%s sslrootcert=%s", + c.Host, c.Port, c.Username, c.Password, name, c.SSLMode, c.RootCertFile, + ) + } else { + cmd = fmt.Sprintf( + "host=%s port=%d user=%s password='%s' dbname=%s sslmode=disable", + c.Host, c.Port, c.Username, c.Password, name, + ) + } + + return cmd +} + +func (c *DatabaseConfig) LogSafeConnectionString(withSSL bool) string { + return c.LogSafeConnectionStringWithName(c.Name, withSSL) +} + +func (c *DatabaseConfig) LogSafeConnectionStringWithName(name string, withSSL bool) string { + if withSSL { + return fmt.Sprintf( + "host=%s port=%d user=%s password='' dbname=%s sslmode=%s sslrootcert=''", + c.Host, c.Port, c.Username, name, c.SSLMode, + ) + } else { + return fmt.Sprintf( + "host=%s port=%d user=%s password='' dbname=%s", + c.Host, c.Port, c.Username, name, + ) + } +} diff --git a/pkg/config/health_check.go b/pkg/config/health_check.go new file mode 100755 index 0000000..e3aaa79 --- /dev/null +++ b/pkg/config/health_check.go @@ -0,0 +1,26 @@ +package config + +import ( + "github.com/spf13/pflag" +) + +type HealthCheckConfig struct { + BindAddress string `json:"bind_address"` + EnableHTTPS bool `json:"enable_https"` +} + +func NewHealthCheckConfig() *HealthCheckConfig { + return &HealthCheckConfig{ + BindAddress: "localhost:8083", + EnableHTTPS: false, + } +} + +func (c *HealthCheckConfig) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&c.BindAddress, "health-check-server-bindaddress", c.BindAddress, "Health check server bind adddress") + fs.BoolVar(&c.EnableHTTPS, "enable-health-check-https", c.EnableHTTPS, "Enable HTTPS for health check server") +} + +func (c *HealthCheckConfig) ReadFiles() error { + return nil +} diff --git a/pkg/config/metrics.go b/pkg/config/metrics.go new file mode 100755 index 0000000..b01c3c3 --- /dev/null +++ b/pkg/config/metrics.go @@ -0,0 +1,31 @@ +package config + +import ( + "time" + + "github.com/spf13/pflag" +) + +type MetricsConfig struct { + BindAddress string `json:"bind_address"` + EnableHTTPS bool `json:"enable_https"` + LabelMetricsInclusionDuration time.Duration `json:"label_metrics_inclusion_duration"` +} + +func NewMetricsConfig() *MetricsConfig { + return &MetricsConfig{ + BindAddress: "localhost:8080", + EnableHTTPS: false, + LabelMetricsInclusionDuration: 7 * 24 * time.Hour, + } +} + +func (s *MetricsConfig) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&s.BindAddress, "metrics-server-bindaddress", s.BindAddress, "Metrics server bind adddress") + fs.BoolVar(&s.EnableHTTPS, "enable-metrics-https", s.EnableHTTPS, "Enable HTTPS for metrics server") + fs.DurationVar(&s.LabelMetricsInclusionDuration, "label-metrics-inclusion-duration", 7*24*time.Hour, "A cluster's last telemetry date needs be within in this duration in order to have labels collected") +} + +func (s *MetricsConfig) ReadFiles() error { + return nil +} diff --git a/pkg/config/ocm.go b/pkg/config/ocm.go new file mode 100755 index 0000000..1f7c6ab --- /dev/null +++ b/pkg/config/ocm.go @@ -0,0 +1,56 @@ +package config + +import ( + "github.com/spf13/pflag" +) + +type OCMConfig struct { + BaseURL string `json:"base_url"` + ClientID string `json:"client-id"` + ClientIDFile string `json:"client-id_file"` + ClientSecret string `json:"client-secret"` + ClientSecretFile string `json:"client-secret_file"` + SelfToken string `json:"self_token"` + SelfTokenFile string `json:"self_token_file"` + TokenURL string `json:"token_url"` + Debug bool `json:"debug"` + EnableMock bool `json:"enable_mock"` +} + +func NewOCMConfig() *OCMConfig { + return &OCMConfig{ + BaseURL: "https://api.integration.openshift.com", + TokenURL: "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/token", + ClientIDFile: "secrets/ocm-service.clientId", + ClientSecretFile: "secrets/ocm-service.clientSecret", + SelfTokenFile: "", + Debug: false, + EnableMock: true, + } +} + +func (c *OCMConfig) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&c.ClientIDFile, "ocm-client-id-file", c.ClientIDFile, "File containing OCM API privileged account client-id") + fs.StringVar(&c.ClientSecretFile, "ocm-client-secret-file", c.ClientSecretFile, "File containing OCM API privileged account client-secret") + fs.StringVar(&c.SelfTokenFile, "self-token-file", c.SelfTokenFile, "File containing OCM API privileged offline SSO token") + fs.StringVar(&c.BaseURL, "ocm-base-url", c.BaseURL, "The base URL of the OCM API, integration by default") + fs.StringVar(&c.TokenURL, "ocm-token-url", c.TokenURL, "The base URL that OCM uses to request tokens, stage by default") + fs.BoolVar(&c.Debug, "ocm-debug", c.Debug, "Debug flag for OCM API") + fs.BoolVar(&c.EnableMock, "enable-ocm-mock", c.EnableMock, "Enable mock ocm clients") +} + +func (c *OCMConfig) ReadFiles() error { + if c.EnableMock { + return nil + } + err := readFileValueString(c.ClientIDFile, &c.ClientID) + if err != nil { + return err + } + err = readFileValueString(c.ClientSecretFile, &c.ClientSecret) + if err != nil { + return err + } + err = readFileValueString(c.SelfTokenFile, &c.SelfToken) + return err +} diff --git a/pkg/config/server.go b/pkg/config/server.go new file mode 100755 index 0000000..9258fd8 --- /dev/null +++ b/pkg/config/server.go @@ -0,0 +1,58 @@ +package config + +import ( + "time" + + "github.com/spf13/pflag" +) + +type ServerConfig struct { + Hostname string `json:"hostname"` + BindAddress string `json:"bind_address"` + ReadTimeout time.Duration `json:"read_timeout"` + WriteTimeout time.Duration `json:"write_timeout"` + HTTPSCertFile string `json:"https_cert_file"` + HTTPSKeyFile string `json:"https_key_file"` + EnableHTTPS bool `json:"enable_https"` + EnableJWT bool `json:"enable_jwt"` + EnableAuthz bool `json:"enable_authz"` + JwkCertFile string `json:"jwk_cert_file"` + JwkCertURL string `json:"jwk_cert_url"` + ACLFile string `json:"acl_file"` +} + +func NewServerConfig() *ServerConfig { + return &ServerConfig{ + Hostname: "", + BindAddress: "localhost:8000", + ReadTimeout: 5 * time.Second, + WriteTimeout: 30 * time.Second, + EnableHTTPS: false, + EnableJWT: true, + EnableAuthz: true, + JwkCertFile: "", + JwkCertURL: "https://sso.redhat.com/auth/realms/redhat-external/protocol/openid-connect/certs", + ACLFile: "", + HTTPSCertFile: "", + HTTPSKeyFile: "", + } +} + +func (s *ServerConfig) AddFlags(fs *pflag.FlagSet) { + fs.StringVar(&s.BindAddress, "api-server-bindaddress", s.BindAddress, "API server bind adddress") + fs.StringVar(&s.Hostname, "api-server-hostname", s.Hostname, "Server's public hostname") + fs.DurationVar(&s.ReadTimeout, "http-read-timeout", s.ReadTimeout, "HTTP server read timeout") + fs.DurationVar(&s.WriteTimeout, "http-write-timeout", s.WriteTimeout, "HTTP server write timeout") + fs.StringVar(&s.HTTPSCertFile, "https-cert-file", s.HTTPSCertFile, "The path to the tls.crt file.") + fs.StringVar(&s.HTTPSKeyFile, "https-key-file", s.HTTPSKeyFile, "The path to the tls.key file.") + fs.BoolVar(&s.EnableHTTPS, "enable-https", s.EnableHTTPS, "Enable HTTPS rather than HTTP") + fs.BoolVar(&s.EnableJWT, "enable-jwt", s.EnableJWT, "Enable JWT authentication validation") + fs.BoolVar(&s.EnableAuthz, "enable-authz", s.EnableAuthz, "Enable Authorization on endpoints, should only be disabled for debug") + fs.StringVar(&s.JwkCertFile, "jwk-cert-file", s.JwkCertFile, "JWK Certificate file") + fs.StringVar(&s.JwkCertURL, "jwk-cert-url", s.JwkCertURL, "JWK Certificate URL") + fs.StringVar(&s.ACLFile, "acl-file", s.ACLFile, "Access control list file") +} + +func (s *ServerConfig) ReadFiles() error { + return nil +} diff --git a/pkg/dao/adapter_status.go b/pkg/dao/adapter_status.go new file mode 100644 index 0000000..30f07f8 --- /dev/null +++ b/pkg/dao/adapter_status.go @@ -0,0 +1,115 @@ +package dao + +import ( + "context" + + "gorm.io/gorm/clause" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +type AdapterStatusDao interface { + Get(ctx context.Context, id string) (*api.AdapterStatus, error) + Create(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) + Replace(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) + Delete(ctx context.Context, id string) error + FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, error) + FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, offset, limit int) (api.AdapterStatusList, int64, error) + FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, error) + All(ctx context.Context) (api.AdapterStatusList, error) +} + +var _ AdapterStatusDao = &sqlAdapterStatusDao{} + +type sqlAdapterStatusDao struct { + sessionFactory *db.SessionFactory +} + +func NewAdapterStatusDao(sessionFactory *db.SessionFactory) AdapterStatusDao { + return &sqlAdapterStatusDao{sessionFactory: sessionFactory} +} + +func (d *sqlAdapterStatusDao) Get(ctx context.Context, id string) (*api.AdapterStatus, error) { + g2 := (*d.sessionFactory).New(ctx) + var adapterStatus api.AdapterStatus + if err := g2.Take(&adapterStatus, "id = ?", id).Error; err != nil { + return nil, err + } + return &adapterStatus, nil +} + +func (d *sqlAdapterStatusDao) Create(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Create(adapterStatus).Error; err != nil { + db.MarkForRollback(ctx, err) + return nil, err + } + return adapterStatus, nil +} + +func (d *sqlAdapterStatusDao) Replace(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, error) { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Save(adapterStatus).Error; err != nil { + db.MarkForRollback(ctx, err) + return nil, err + } + return adapterStatus, nil +} + +func (d *sqlAdapterStatusDao) Delete(ctx context.Context, id string) error { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Delete(&api.AdapterStatus{Meta: api.Meta{ID: id}}).Error; err != nil { + db.MarkForRollback(ctx, err) + return err + } + return nil +} + +func (d *sqlAdapterStatusDao) FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, error) { + g2 := (*d.sessionFactory).New(ctx) + statuses := api.AdapterStatusList{} + if err := g2.Where("resource_type = ? AND resource_id = ?", resourceType, resourceID).Find(&statuses).Error; err != nil { + return nil, err + } + return statuses, nil +} + +func (d *sqlAdapterStatusDao) FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, offset, limit int) (api.AdapterStatusList, int64, error) { + g2 := (*d.sessionFactory).New(ctx) + statuses := api.AdapterStatusList{} + var total int64 + + // Base query + query := g2.Where("resource_type = ? AND resource_id = ?", resourceType, resourceID) + + // Get total count for pagination metadata + if err := query.Model(&api.AdapterStatus{}).Count(&total).Error; err != nil { + return nil, 0, err + } + + // Apply pagination using OFFSET and LIMIT + if err := query.Offset(offset).Limit(limit).Find(&statuses).Error; err != nil { + return nil, 0, err + } + + return statuses, total, nil +} + +func (d *sqlAdapterStatusDao) FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, error) { + g2 := (*d.sessionFactory).New(ctx) + var adapterStatus api.AdapterStatus + if err := g2.Where("resource_type = ? AND resource_id = ? AND adapter = ?", resourceType, resourceID, adapter).Take(&adapterStatus).Error; err != nil { + return nil, err + } + return &adapterStatus, nil +} + +func (d *sqlAdapterStatusDao) All(ctx context.Context) (api.AdapterStatusList, error) { + g2 := (*d.sessionFactory).New(ctx) + statuses := api.AdapterStatusList{} + if err := g2.Find(&statuses).Error; err != nil { + return nil, err + } + return statuses, nil +} diff --git a/pkg/dao/cluster.go b/pkg/dao/cluster.go new file mode 100644 index 0000000..ecbb4dd --- /dev/null +++ b/pkg/dao/cluster.go @@ -0,0 +1,83 @@ +package dao + +import ( + "context" + + "gorm.io/gorm/clause" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +type ClusterDao interface { + Get(ctx context.Context, id string) (*api.Cluster, error) + Create(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) + Replace(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) + Delete(ctx context.Context, id string) error + FindByIDs(ctx context.Context, ids []string) (api.ClusterList, error) + All(ctx context.Context) (api.ClusterList, error) +} + +var _ ClusterDao = &sqlClusterDao{} + +type sqlClusterDao struct { + sessionFactory *db.SessionFactory +} + +func NewClusterDao(sessionFactory *db.SessionFactory) ClusterDao { + return &sqlClusterDao{sessionFactory: sessionFactory} +} + +func (d *sqlClusterDao) Get(ctx context.Context, id string) (*api.Cluster, error) { + g2 := (*d.sessionFactory).New(ctx) + var cluster api.Cluster + if err := g2.Take(&cluster, "id = ?", id).Error; err != nil { + return nil, err + } + return &cluster, nil +} + +func (d *sqlClusterDao) Create(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Create(cluster).Error; err != nil { + db.MarkForRollback(ctx, err) + return nil, err + } + return cluster, nil +} + +func (d *sqlClusterDao) Replace(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Save(cluster).Error; err != nil { + db.MarkForRollback(ctx, err) + return nil, err + } + return cluster, nil +} + +func (d *sqlClusterDao) Delete(ctx context.Context, id string) error { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Delete(&api.Cluster{Meta: api.Meta{ID: id}}).Error; err != nil { + db.MarkForRollback(ctx, err) + return err + } + return nil +} + +func (d *sqlClusterDao) FindByIDs(ctx context.Context, ids []string) (api.ClusterList, error) { + g2 := (*d.sessionFactory).New(ctx) + clusters := api.ClusterList{} + if err := g2.Where("id in (?)", ids).Find(&clusters).Error; err != nil { + return nil, err + } + return clusters, nil +} + +func (d *sqlClusterDao) All(ctx context.Context) (api.ClusterList, error) { + g2 := (*d.sessionFactory).New(ctx) + clusters := api.ClusterList{} + if err := g2.Find(&clusters).Error; err != nil { + return nil, err + } + return clusters, nil +} diff --git a/pkg/dao/generic.go b/pkg/dao/generic.go new file mode 100755 index 0000000..1038f3f --- /dev/null +++ b/pkg/dao/generic.go @@ -0,0 +1,152 @@ +package dao + +import ( + "context" + "strings" + + "github.com/jinzhu/inflection" + "gorm.io/gorm" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +type Where struct { + sql string + values []any +} + +func NewWhere(sql string, values []any) Where { + return Where{ + sql: sql, + values: values, + } +} + +type GenericDao interface { + Fetch(offset int, limit int, resourceList interface{}) error + + GetInstanceDao(ctx context.Context, model interface{}) GenericDao + Preload(preload string) + OrderBy(orderBy string) + Joins(sql string) + Group(sql string) + Where(where Where) + Count(model interface{}, total *int64) + Validate(resourceList interface{}) error + + GetTableName() string + GetTableRelation(fieldName string) (TableRelation, bool) +} + +var _ GenericDao = &sqlGenericDao{} + +type sqlGenericDao struct { + sessionFactory *db.SessionFactory + g2 *gorm.DB +} + +// TableRelation represents a relationship between two tables. They can be joined, +// ON TableName.ColumnName = ForeignTableName.ForeignColumnName +type TableRelation struct { + TableName string + ColumnName string + ForeignTableName string + ForeignColumnName string +} + +func NewGenericDao(sessionFactory *db.SessionFactory) GenericDao { + return &sqlGenericDao{sessionFactory: sessionFactory} +} + +func (d *sqlGenericDao) GetInstanceDao(ctx context.Context, model interface{}) GenericDao { + return &sqlGenericDao{ + sessionFactory: d.sessionFactory, + g2: (*d.sessionFactory).New(ctx).Model(model), + } +} + +func (d *sqlGenericDao) Fetch(offset int, limit int, resourceList interface{}) error { + return d.g2.Debug().Offset(offset).Limit(limit).Find(resourceList).Error +} + +func (d *sqlGenericDao) Preload(preload string) { + d.g2 = d.g2.Preload(preload) +} + +func (d *sqlGenericDao) OrderBy(orderBy string) { + d.g2 = d.g2.Order(orderBy) +} + +func (d *sqlGenericDao) Joins(sql string) { + d.g2 = d.g2.Joins(sql) +} + +func (d *sqlGenericDao) Group(sql string) { + d.g2 = d.g2.Group(sql) +} + +func (d *sqlGenericDao) Where(where Where) { + d.g2 = d.g2.Where(where.sql, where.values...) +} + +func (d *sqlGenericDao) Count(model interface{}, total *int64) { + // Creates new session which already clears all statement clauses + g2 := d.g2.Session(&gorm.Session{DryRun: false}).Model(model) + // Considers existing joins and search params from previous session + if len(d.g2.Statement.Joins) > 0 { + g2.Statement.Joins = d.g2.Statement.Joins + } + if where, ok := d.g2.Statement.Clauses["WHERE"]; ok { + g2.Statement.Clauses["WHERE"] = where + } + g2.Count(total) +} + +// Gorm finishers (Take, First, Last, etc.) are not idempotent +// Use a new session to execute these checks +func (d *sqlGenericDao) Validate(resourceList interface{}) error { + if err := d.g2.Session(&gorm.Session{DryRun: false}).Take(resourceList).Error; err != nil { + return err + } + return nil +} + +func (d *sqlGenericDao) GetTableName() string { + return db.GetTableName(d.g2) +} + +// extract the relation from the api model +func (d *sqlGenericDao) GetTableRelation(fieldName string) (TableRelation, bool) { + // try singular + fieldName = strings.ToUpper(fieldName[:1]) + fieldName[1:] + table := inflection.Singular(fieldName) + association := d.g2.Association(table) + // the relation must exist in the model + if association.Relationship == nil { + // try plural + table = inflection.Plural(fieldName) + association = d.g2.Association(table) + if association.Relationship == nil { + return TableRelation{}, false + } + } + + if association.Relationship.Type != "belongs_to" && association.Relationship.Type != "has_many" { + // we don't use has_one or many_to_many relations + return TableRelation{}, false + } + + columnName := association.Relationship.References[0].ForeignKey.DBName + foreignColumnName := association.Relationship.References[0].PrimaryKey.DBName + if association.Relationship.Type == "has_many" { + columnName = association.Relationship.References[0].PrimaryKey.DBName + foreignColumnName = association.Relationship.References[0].ForeignKey.DBName + } + + return TableRelation{ + TableName: association.Relationship.Field.Schema.Table, + ForeignTableName: association.Relationship.FieldSchema.Table, + ForeignColumnName: foreignColumnName, + ColumnName: columnName, + }, true +} diff --git a/pkg/dao/mocks/cluster.go b/pkg/dao/mocks/cluster.go new file mode 100644 index 0000000..2836008 --- /dev/null +++ b/pkg/dao/mocks/cluster.go @@ -0,0 +1,51 @@ +package mocks + +import ( + "context" + + "gorm.io/gorm" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +var _ dao.ClusterDao = &clusterDaoMock{} + +type clusterDaoMock struct { + clusters api.ClusterList +} + +func NewClusterDao() *clusterDaoMock { + return &clusterDaoMock{} +} + +func (d *clusterDaoMock) Get(ctx context.Context, id string) (*api.Cluster, error) { + for _, cluster := range d.clusters { + if cluster.ID == id { + return cluster, nil + } + } + return nil, gorm.ErrRecordNotFound +} + +func (d *clusterDaoMock) Create(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) { + d.clusters = append(d.clusters, cluster) + return cluster, nil +} + +func (d *clusterDaoMock) Replace(ctx context.Context, cluster *api.Cluster) (*api.Cluster, error) { + return nil, errors.NotImplemented("Cluster").AsError() +} + +func (d *clusterDaoMock) Delete(ctx context.Context, id string) error { + return errors.NotImplemented("Cluster").AsError() +} + +func (d *clusterDaoMock) FindByIDs(ctx context.Context, ids []string) (api.ClusterList, error) { + return nil, errors.NotImplemented("Cluster").AsError() +} + +func (d *clusterDaoMock) All(ctx context.Context) (api.ClusterList, error) { + return d.clusters, nil +} diff --git a/pkg/dao/mocks/generic.go b/pkg/dao/mocks/generic.go new file mode 100755 index 0000000..a6ff716 --- /dev/null +++ b/pkg/dao/mocks/generic.go @@ -0,0 +1,76 @@ +package mocks + +import ( + "context" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" +) + +var _ dao.GenericDao = &genericDaoMock{} + +type genericDaoMock struct { + preload string + orderBy string + joins string + group string + wheres []dao.Where + model interface{} +} + +func NewGenericDao() *genericDaoMock { + return &genericDaoMock{ + wheres: []dao.Where{}, + } +} + +func (g *genericDaoMock) Fetch(offset int, limit int, resourceList interface{}) error { + // Mock implementation - does nothing but returns no error + return nil +} + +func (g *genericDaoMock) GetInstanceDao(ctx context.Context, model interface{}) dao.GenericDao { + return &genericDaoMock{ + model: model, + wheres: []dao.Where{}, + } +} + +func (g *genericDaoMock) Preload(preload string) { + g.preload = preload +} + +func (g *genericDaoMock) OrderBy(orderBy string) { + g.orderBy = orderBy +} + +func (g *genericDaoMock) Joins(sql string) { + g.joins = sql +} + +func (g *genericDaoMock) Group(sql string) { + g.group = sql +} + +func (g *genericDaoMock) Where(where dao.Where) { + g.wheres = append(g.wheres, where) +} + +func (g *genericDaoMock) Count(model interface{}, total *int64) { + // Mock implementation - sets count to 0 + *total = 0 +} + +func (g *genericDaoMock) Validate(resourceList interface{}) error { + // Mock implementation - returns no error + return nil +} + +func (g *genericDaoMock) GetTableName() string { + // Mock implementation - returns empty string + return "" +} + +func (g *genericDaoMock) GetTableRelation(fieldName string) (dao.TableRelation, bool) { + // Mock implementation - returns empty relation and false + return dao.TableRelation{}, false +} diff --git a/pkg/dao/mocks/node_pool.go b/pkg/dao/mocks/node_pool.go new file mode 100644 index 0000000..d243d52 --- /dev/null +++ b/pkg/dao/mocks/node_pool.go @@ -0,0 +1,51 @@ +package mocks + +import ( + "context" + + "gorm.io/gorm" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +var _ dao.NodePoolDao = &nodePoolDaoMock{} + +type nodePoolDaoMock struct { + nodePools api.NodePoolList +} + +func NewNodePoolDao() *nodePoolDaoMock { + return &nodePoolDaoMock{} +} + +func (d *nodePoolDaoMock) Get(ctx context.Context, id string) (*api.NodePool, error) { + for _, nodePool := range d.nodePools { + if nodePool.ID == id { + return nodePool, nil + } + } + return nil, gorm.ErrRecordNotFound +} + +func (d *nodePoolDaoMock) Create(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, error) { + d.nodePools = append(d.nodePools, nodePool) + return nodePool, nil +} + +func (d *nodePoolDaoMock) Replace(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, error) { + return nil, errors.NotImplemented("NodePool").AsError() +} + +func (d *nodePoolDaoMock) Delete(ctx context.Context, id string) error { + return errors.NotImplemented("NodePool").AsError() +} + +func (d *nodePoolDaoMock) FindByIDs(ctx context.Context, ids []string) (api.NodePoolList, error) { + return nil, errors.NotImplemented("NodePool").AsError() +} + +func (d *nodePoolDaoMock) All(ctx context.Context) (api.NodePoolList, error) { + return d.nodePools, nil +} diff --git a/pkg/dao/node_pool.go b/pkg/dao/node_pool.go new file mode 100644 index 0000000..e62ddd2 --- /dev/null +++ b/pkg/dao/node_pool.go @@ -0,0 +1,83 @@ +package dao + +import ( + "context" + + "gorm.io/gorm/clause" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +type NodePoolDao interface { + Get(ctx context.Context, id string) (*api.NodePool, error) + Create(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, error) + Replace(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, error) + Delete(ctx context.Context, id string) error + FindByIDs(ctx context.Context, ids []string) (api.NodePoolList, error) + All(ctx context.Context) (api.NodePoolList, error) +} + +var _ NodePoolDao = &sqlNodePoolDao{} + +type sqlNodePoolDao struct { + sessionFactory *db.SessionFactory +} + +func NewNodePoolDao(sessionFactory *db.SessionFactory) NodePoolDao { + return &sqlNodePoolDao{sessionFactory: sessionFactory} +} + +func (d *sqlNodePoolDao) Get(ctx context.Context, id string) (*api.NodePool, error) { + g2 := (*d.sessionFactory).New(ctx) + var nodePool api.NodePool + if err := g2.Take(&nodePool, "id = ?", id).Error; err != nil { + return nil, err + } + return &nodePool, nil +} + +func (d *sqlNodePoolDao) Create(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, error) { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Create(nodePool).Error; err != nil { + db.MarkForRollback(ctx, err) + return nil, err + } + return nodePool, nil +} + +func (d *sqlNodePoolDao) Replace(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, error) { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Save(nodePool).Error; err != nil { + db.MarkForRollback(ctx, err) + return nil, err + } + return nodePool, nil +} + +func (d *sqlNodePoolDao) Delete(ctx context.Context, id string) error { + g2 := (*d.sessionFactory).New(ctx) + if err := g2.Omit(clause.Associations).Delete(&api.NodePool{Meta: api.Meta{ID: id}}).Error; err != nil { + db.MarkForRollback(ctx, err) + return err + } + return nil +} + +func (d *sqlNodePoolDao) FindByIDs(ctx context.Context, ids []string) (api.NodePoolList, error) { + g2 := (*d.sessionFactory).New(ctx) + nodePools := api.NodePoolList{} + if err := g2.Where("id in (?)", ids).Find(&nodePools).Error; err != nil { + return nil, err + } + return nodePools, nil +} + +func (d *sqlNodePoolDao) All(ctx context.Context) (api.NodePoolList, error) { + g2 := (*d.sessionFactory).New(ctx) + nodePools := api.NodePoolList{} + if err := g2.Find(&nodePools).Error; err != nil { + return nil, err + } + return nodePools, nil +} diff --git a/pkg/db/README.md b/pkg/db/README.md new file mode 100755 index 0000000..763802f --- /dev/null +++ b/pkg/db/README.md @@ -0,0 +1,67 @@ +# Migrations + +Database migrations are handled by this package. All migrations should be created in separate files, following a starndard naming convetion + +The `migrations.go` file defines an array of migrate functions that are called by the [gormigrate](https://gopkg.in/gormigrate.v1) helper. Each migration function should perform a specific migration. + +## Creating a new migration + +Create a migration ID based on the time using the YYYYMMDDHHMM format. Example: `August 21 2018 at 2:54pm` would be `201808211454`. + +Your migration's name should be used in the file name and in the function name and should adequately represent the actions your migration is taking. If your migration is doing too much to fit in a name, you should consider creating multiple migrations. + +Create a separate file in `pkg/db/` following the naming schema in place: `_.go`. In the file, you'll create a function that returns a [gormmigrate.Migration](https://gopkg.in/gormigrate.v1/blob/master/gormigrate.go#L37) object with `gormigrate.Migrate` and `gormigrate.Rollback` functions defined. + +Add the function you created in the separate file to the `migrations` list in `pkg/db/migrations.go`. + +If necessary, write a test to verify the migration. See `test/integration/migrations_test.go` for examples. + +## Migration Rules + +### Migration IDs + +Each migration has an ID that defines the order in which the migration is run. + +IDs are numerical timestamps that must sort ascending. Use YYYYMMDDHHMM w/ 24 hour time for format. +Example: `August 21 2018 at 2:54pm` would be `201808211454`. + +Migration IDs must be descending. If you create a migration, submit an MR, and another MR is merged before yours is able to be merged, you must update the ID to represent a date later than any previous migration. + +### Models in Migrations + +Represent modesl inline with migrations to represent the evolution of the object over time. + +For example, it is necessary to add a boolean field "hidden" to the "Account" model. This is how you would represent the account model in that migration: +```golang +type Account struct { + Model + Username string + FirstName string + LastName string + Hidden boolean +} + +err := tx.AutoMigrate(&Account{}).Error +if err != nil { +... +``` + +**DO NOT IMPORT THE API PKG**. When a migration imports the `api` pkg and uses models defined in it, the migration may work the first time it is run. The models in `pkg/api` are bound to change as the project grows. Eventually, the models could change so that the migration breaks, causing any new deployments to fail on your old shitty migration. + +### Record Deletions + +If it is necessary to delete a record in a migration, be aware of a couple caveats around deleting wth gorm: + +1. You must pass a record with an ID to `gorm.Delete(&record)`, otherwise **ALL RECORDS WILL BE DELETED** + +2. Gorm [soft deletes](http://gorm.io/docs/delete.html#Soft-Delete) by default. This means it sets the `deleted_at` field to a non-null value and any subsequent gorm calls will ignore it. If you are deleting a record that needs to be permanently deleted (like permissions), use `gorm.Unscoped().Delete`. + +See the [gorm documentation around deletions](http://gorm.io/docs/delete.html) for more information + +## Migration tests + +In most cases, it shouldn't be necessary to create a test for a migration. However, if the migration is manipulating records and poses a significant risk of completely borking up important data, a test should be written. + +Tests are difficult to write for migrations and are likely to fail one day long after the migration has already run in production. After a migration is run in production, it is safe to delete the test from the integration test suite. + +The test `helper` has a couple helpful functions for testing migrations. You can use `h.CleanDB()` to completely wipe the database clean, then `h.MigrateTo()` to migrate to a specific migration ID. You should then be able to create whatever records in the database you need to test against and finally run `h.MigrateDB()` to run your created migration and all subsequent migrations. diff --git a/pkg/db/context.go b/pkg/db/context.go new file mode 100755 index 0000000..a0d0ac6 --- /dev/null +++ b/pkg/db/context.go @@ -0,0 +1,57 @@ +package db + +import ( + "context" + + dbContext "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/db_context" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +// NewContext returns a new context with transaction stored in it. +// Upon error, the original context is still returned along with an error +func NewContext(ctx context.Context, connection SessionFactory) (context.Context, error) { + tx, err := newTransaction(ctx, connection) + if err != nil { + return ctx, err + } + + ctx = dbContext.WithTransaction(ctx, tx) + + return ctx, nil +} + +// Resolve resolves the current transaction according to the rollback flag. +func Resolve(ctx context.Context) { + log := logger.NewOCMLogger(ctx) + tx, ok := dbContext.Transaction(ctx) + if !ok { + log.Error("Could not retrieve transaction from context") + return + } + + if tx.MarkedForRollback() { + if err := tx.Rollback(); err != nil { + log.Extra("error", err.Error()).Error("Could not rollback transaction") + return + } + log.Infof("Rolled back transaction") + } else { + if err := tx.Commit(); err != nil { + // TODO: what does the user see when this occurs? seems like they will get a false positive + log.Extra("error", err.Error()).Error("Could not commit transaction") + return + } + } +} + +// MarkForRollback flags the transaction stored in the context for rollback and logs whatever error caused the rollback +func MarkForRollback(ctx context.Context, err error) { + log := logger.NewOCMLogger(ctx) + transaction, ok := dbContext.Transaction(ctx) + if !ok { + log.Error("failed to mark transaction for rollback: could not retrieve transaction from context") + return + } + transaction.SetRollbackFlag(true) + log.Infof("Marked transaction for rollback, err: %v", err) +} diff --git a/pkg/db/db_context/db_context.go b/pkg/db/db_context/db_context.go new file mode 100755 index 0000000..de19e61 --- /dev/null +++ b/pkg/db/db_context/db_context.go @@ -0,0 +1,35 @@ +// Package db_context dbContext provides a wrapper around db context handling to allow access to the db context without +// requiring importing the db package, thus avoiding cyclic imports +package db_context + +import ( + "context" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/transaction" +) + +type contextKey int + +const ( + transactionKey contextKey = iota +) + +// WithTransaction adds the transaction to the context and returns a new context +func WithTransaction(ctx context.Context, tx *transaction.Transaction) context.Context { + return context.WithValue(ctx, transactionKey, tx) +} + +// Transaction extracts the transaction value from the context +func Transaction(ctx context.Context) (tx *transaction.Transaction, ok bool) { + tx, ok = ctx.Value(transactionKey).(*transaction.Transaction) + return tx, ok +} + +// TxID Return the transaction ID from the context, if it exists. If there is no transaction, ok is false. +func TxID(ctx context.Context) (id int64, ok bool) { + tx, ok := Transaction(ctx) + if !ok { + return 0, false + } + return tx.TxID(), true +} diff --git a/pkg/db/db_session/db_session.go b/pkg/db/db_session/db_session.go new file mode 100755 index 0000000..c05baa8 --- /dev/null +++ b/pkg/db/db_session/db_session.go @@ -0,0 +1,9 @@ +package db_session + +import "sync" + +const ( + disable = "disable" +) + +var once sync.Once diff --git a/pkg/db/db_session/default.go b/pkg/db/db_session/default.go new file mode 100755 index 0000000..4abb33b --- /dev/null +++ b/pkg/db/db_session/default.go @@ -0,0 +1,166 @@ +package db_session + +import ( + "context" + "database/sql" + "fmt" + "time" + + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + + "github.com/lib/pq" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + ocmlogger "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +type Default struct { + config *config.DatabaseConfig + + g2 *gorm.DB + // Direct database connection. + // It is used: + // - to setup/close connection because GORM V2 removed gorm.Close() + // - to work with pq.CopyIn because connection returned by GORM V2 gorm.DB() in "not the same" + db *sql.DB +} + +var _ db.SessionFactory = &Default{} + +func NewProdFactory(config *config.DatabaseConfig) *Default { + conn := &Default{} + conn.Init(config) + return conn +} + +// Init will initialize a singleton connection as needed and return the same instance. +// Go includes database connection pooling in the platform. Gorm uses the same and provides a method to +// clone a connection via New(), which is safe for use by concurrent Goroutines. +func (f *Default) Init(config *config.DatabaseConfig) { + // Only the first time + once.Do(func() { + var ( + dbx *sql.DB + g2 *gorm.DB + err error + ) + + // Open connection to DB via standard library + dbx, err = sql.Open(config.Dialect, config.ConnectionString(config.SSLMode != disable)) + if err != nil { + dbx, err = sql.Open(config.Dialect, config.ConnectionString(false)) + if err != nil { + panic(fmt.Sprintf( + "SQL failed to connect to %s database %s with connection string: %s\nError: %s", + config.Dialect, + config.Name, + config.LogSafeConnectionString(config.SSLMode != disable), + err.Error(), + )) + } + } + dbx.SetMaxOpenConns(config.MaxOpenConnections) + + // Connect GORM to use the same connection + conf := &gorm.Config{ + PrepareStmt: false, + FullSaveAssociations: false, + } + g2, err = gorm.Open(postgres.New(postgres.Config{ + Conn: dbx, + // Disable implicit prepared statement usage (GORM V2 uses pgx as database/sql driver and it enables prepared + /// statement cache by default) + // In migrations we both change tables' structure and running SQLs to modify data. + // This way all prepared statements becomes invalid. + PreferSimpleProtocol: true, + }), conf) + if err != nil { + panic(fmt.Sprintf( + "GORM failed to connect to %s database %s with connection string: %s\nError: %s", + config.Dialect, + config.Name, + config.LogSafeConnectionString(config.SSLMode != disable), + err.Error(), + )) + } + + f.config = config + f.g2 = g2 + f.db = dbx + }) +} + +func (f *Default) DirectDB() *sql.DB { + return f.db +} + +func waitForNotification(l *pq.Listener, callback func(id string)) { + logger := ocmlogger.NewOCMLogger(context.Background()) + for { + select { + case n := <-l.Notify: + logger.Infof("Received data from channel [%s] : %s", n.Channel, n.Extra) + callback(n.Extra) + return + case <-time.After(10 * time.Second): + logger.V(10).Infof("Received no events on channel during interval. Pinging source") + go func() { + l.Ping() + }() + return + } + } +} + +func newListener(ctx context.Context, connstr, channel string, callback func(id string)) { + logger := ocmlogger.NewOCMLogger(ctx) + + plog := func(ev pq.ListenerEventType, err error) { + if err != nil { + logger.Error(err.Error()) + } + } + listener := pq.NewListener(connstr, 10*time.Second, time.Minute, plog) + err := listener.Listen(channel) + if err != nil { + panic(err) + } + + logger.Infof("Starting channeling monitor for %s", channel) + for { + waitForNotification(listener, callback) + } +} + +func (f *Default) NewListener(ctx context.Context, channel string, callback func(id string)) { + newListener(ctx, f.config.ConnectionString(true), channel, callback) +} + +func (f *Default) New(ctx context.Context) *gorm.DB { + conn := f.g2.Session(&gorm.Session{ + Context: ctx, + Logger: f.g2.Logger.LogMode(logger.Silent), + }) + if f.config.Debug { + conn = conn.Debug() + } + return conn +} + +func (f *Default) CheckConnection() error { + return f.g2.Exec("SELECT 1").Error +} + +// Close will close the connection to the database. +// THIS MUST **NOT** BE CALLED UNTIL THE SERVER/PROCESS IS EXITING!! +// This should only ever be called once for the entire duration of the application and only at the end. +func (f *Default) Close() error { + return f.db.Close() +} + +func (f *Default) ResetDB() { + panic("ResetDB is not implemented for non-integration-test env") +} diff --git a/pkg/db/db_session/test.go b/pkg/db/db_session/test.go new file mode 100755 index 0000000..3298806 --- /dev/null +++ b/pkg/db/db_session/test.go @@ -0,0 +1,223 @@ +package db_session + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/golang/glog" + "github.com/lib/pq" + + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +type Test struct { + config *config.DatabaseConfig + g2 *gorm.DB + // Direct database connection. + // It is used: + // - to setup/close connection because GORM V2 removed gorm.Close() + // - to work with pq.CopyIn because connection returned by GORM V2 gorm.DB() in "not the same" + db *sql.DB + + wasDisconnected bool +} + +var _ db.SessionFactory = &Test{} + +func NewTestFactory(config *config.DatabaseConfig) *Test { + conn := &Test{} + conn.Init(config) + return conn +} + +// The approach: +// Every new Postgres database is implicitly copied from a template database `template1`. Any changes to template1 +// are then copied to a new database, and this copy is a cheap filesystem operation. + +// Init will: +// - initialize a template1 DB with migrations +// - rebuild AMS DB from template1 +// - return a new connection factory +// Go includes database connection pooling in the platform. Gorm uses the same and provides a method to +// clone a connection via New(), which is safe for use by concurrent Goroutines. +func (f *Test) Init(config *config.DatabaseConfig) { + // Only the first time + once.Do(func() { + if err := initDatabase(config, db.Migrate); err != nil { + glog.Errorf("error initializing test database: %s", err) + return + } + + if err := resetDB(config); err != nil { + glog.Errorf("error resetting test database: %s", err) + return + } + }) + + f.config = config + f.db, f.g2 = connectFactory(config) +} + +func initDatabase(config *config.DatabaseConfig, migrate func(db2 *gorm.DB) error) error { + // - Connect to `template1` DB + dbx, g2, cleanup := connect("template1", config) + defer cleanup() + + for _, err := dbx.Exec(`select 1`); err != nil; { + time.Sleep(100 * time.Millisecond) + } + + // - Run migrations + return migrate(g2) +} + +func resetDB(config *config.DatabaseConfig) error { + // Reconnect to the default `postgres` database, so we can drop the existing db and recreate it + dbx, _, cleanup := connect("postgres", config) + defer cleanup() + + // Drop `all` connections to both `template1` and AMS DB, so it can be dropped and created + if err := dropConnections(dbx, "template1"); err != nil { + return err + } + if err := dropConnections(dbx, config.Name); err != nil { + return err + } + + // Rebuild AMS DB + query := fmt.Sprintf("DROP DATABASE IF EXISTS %s", pq.QuoteIdentifier(config.Name)) + if _, err := dbx.Exec(query); err != nil { + return fmt.Errorf("SQL failed to DROP database %s: %s", config.Name, err.Error()) + } + query = fmt.Sprintf("CREATE DATABASE %s TEMPLATE template1", pq.QuoteIdentifier(config.Name)) + if _, err := dbx.Exec(query); err != nil { + return fmt.Errorf("SQL failed to CREATE database %s: %s", config.Name, err.Error()) + } + // As `template1` had all migrations, so now AMS DB has them too! + return nil +} + +// connect to database specified by `name` and return connections + cleanup function +func connect(name string, config *config.DatabaseConfig) (*sql.DB, *gorm.DB, func()) { + var ( + dbx *sql.DB + g2 *gorm.DB + err error + ) + + dbx, err = sql.Open(config.Dialect, config.ConnectionStringWithName(name, config.SSLMode != disable)) + if err != nil { + dbx, err = sql.Open(config.Dialect, config.ConnectionStringWithName(name, false)) + if err != nil { + panic(fmt.Sprintf( + "SQL failed to connect to %s database %s with connection string: %s\nError: %s", + config.Dialect, + name, + config.LogSafeConnectionStringWithName(name, config.SSLMode != disable), + err.Error(), + )) + } + } + + // Connect GORM to use the same connection + conf := &gorm.Config{ + PrepareStmt: false, + FullSaveAssociations: false, + SkipDefaultTransaction: true, + Logger: logger.Default.LogMode(logger.Silent), + } + g2, err = gorm.Open(postgres.New(postgres.Config{ + Conn: dbx, + // Disable implicit prepared statement usage (GORM V2 uses pgx as database/sql driver and it enables prepared + // statement cache by default) + // In migrations we both change tables' structure and running SQLs to modify data. + // This way all prepared statements becomes invalid. + PreferSimpleProtocol: true, + }), conf) + if err != nil { + panic(fmt.Sprintf( + "GORM failed to connect to %s database %s with connection string: %s\nError: %s", + config.Dialect, + config.Name, + config.LogSafeConnectionString(config.SSLMode != disable), + err.Error(), + )) + } + + return dbx, g2, func() { + if err := dbx.Close(); err != nil { + panic(err) + } + } +} + +// KILL all connections to the specified DB +func dropConnections(dbx *sql.DB, name string) error { + query := ` + select pg_terminate_backend(pg_stat_activity.pid) + from pg_stat_activity + where pg_stat_activity.datname = $1 and pid <> pg_backend_pid()` + _, err := dbx.Exec(query, name) + if err != nil { + return err + } + return nil +} + +func connectFactory(config *config.DatabaseConfig) (*sql.DB, *gorm.DB) { + var ( + dbx *sql.DB + g2 *gorm.DB + ) + dbx, g2, _ = connect(config.Name, config) + dbx.SetMaxOpenConns(config.MaxOpenConnections) + + return dbx, g2 +} + +func (f *Test) DirectDB() *sql.DB { + return f.db +} + +func (f *Test) New(ctx context.Context) *gorm.DB { + if f.wasDisconnected { + // Connection was killed in order to reset DB + f.db, f.g2 = connectFactory(f.config) + f.wasDisconnected = false + } + + conn := f.g2.Session(&gorm.Session{ + Context: ctx, + Logger: f.g2.Logger.LogMode(logger.Silent), + }) + if f.config.Debug { + conn = conn.Debug() + } + return conn +} + +// CheckConnection checks to ensure a connection is present +func (f *Test) CheckConnection() error { + _, err := f.db.Exec("SELECT 1") + return err +} + +func (f *Test) Close() error { + return f.db.Close() +} + +func (f *Test) ResetDB() { + resetDB(f.config) + f.wasDisconnected = true +} + +func (f *Test) NewListener(ctx context.Context, channel string, callback func(id string)) { + newListener(ctx, f.config.ConnectionString(true), channel, callback) +} diff --git a/pkg/db/db_session/testcontainer.go b/pkg/db/db_session/testcontainer.go new file mode 100755 index 0000000..76e1a26 --- /dev/null +++ b/pkg/db/db_session/testcontainer.go @@ -0,0 +1,182 @@ +package db_session + +import ( + "context" + "database/sql" + "fmt" + "time" + + "github.com/golang/glog" + "github.com/testcontainers/testcontainers-go" + "github.com/testcontainers/testcontainers-go/modules/postgres" + "github.com/testcontainers/testcontainers-go/wait" + + gormpostgres "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +type Testcontainer struct { + config *config.DatabaseConfig + container *postgres.PostgresContainer + g2 *gorm.DB + sqlDB *sql.DB +} + +var _ db.SessionFactory = &Testcontainer{} + +// NewTestcontainerFactory creates a SessionFactory using testcontainers. +// This starts a real PostgreSQL container for integration testing. +func NewTestcontainerFactory(config *config.DatabaseConfig) *Testcontainer { + conn := &Testcontainer{ + config: config, + } + conn.Init(config) + return conn +} + +func (f *Testcontainer) Init(config *config.DatabaseConfig) { + ctx := context.Background() + + glog.Infof("Starting PostgreSQL testcontainer...") + + // Create PostgreSQL container + container, err := postgres.Run(ctx, + "postgres:14.2", + postgres.WithDatabase(config.Name), + postgres.WithUsername(config.Username), + postgres.WithPassword(config.Password), + testcontainers.WithWaitStrategy( + wait.ForLog("database system is ready to accept connections"). + WithOccurrence(2). + WithStartupTimeout(60*time.Second)), + ) + if err != nil { + glog.Fatalf("Failed to start PostgreSQL testcontainer: %s", err) + } + + f.container = container + + // Get connection string from container + connStr, err := container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + glog.Fatalf("Failed to get connection string from testcontainer: %s", err) + } + + glog.Infof("PostgreSQL testcontainer started at: %s", connStr) + + // Open SQL connection + f.sqlDB, err = sql.Open("postgres", connStr) + if err != nil { + glog.Fatalf("Failed to connect to testcontainer database: %s", err) + } + + // Configure connection pool + f.sqlDB.SetMaxOpenConns(config.MaxOpenConnections) + + // Connect GORM to use the same connection + conf := &gorm.Config{ + PrepareStmt: false, + FullSaveAssociations: false, + SkipDefaultTransaction: true, + Logger: logger.Default.LogMode(logger.Silent), + } + + if config.Debug { + conf.Logger = logger.Default.LogMode(logger.Info) + } + + f.g2, err = gorm.Open(gormpostgres.New(gormpostgres.Config{ + Conn: f.sqlDB, + PreferSimpleProtocol: true, + }), conf) + if err != nil { + glog.Fatalf("Failed to connect GORM to testcontainer database: %s", err) + } + + // Run migrations + glog.Infof("Running database migrations on testcontainer...") + if err := db.Migrate(f.g2); err != nil { + glog.Fatalf("Failed to run migrations on testcontainer: %s", err) + } + + glog.Infof("Testcontainer database initialized successfully") +} + +func (f *Testcontainer) DirectDB() *sql.DB { + return f.sqlDB +} + +func (f *Testcontainer) New(ctx context.Context) *gorm.DB { + conn := f.g2.Session(&gorm.Session{ + Context: ctx, + Logger: f.g2.Logger.LogMode(logger.Silent), + }) + if f.config.Debug { + conn = conn.Debug() + } + return conn +} + +func (f *Testcontainer) CheckConnection() error { + _, err := f.sqlDB.Exec("SELECT 1") + return err +} + +func (f *Testcontainer) Close() error { + ctx := context.Background() + + // Close SQL connection + if f.sqlDB != nil { + if err := f.sqlDB.Close(); err != nil { + glog.Errorf("Error closing SQL connection: %s", err) + } + } + + // Terminate container + if f.container != nil { + glog.Infof("Stopping PostgreSQL testcontainer...") + if err := f.container.Terminate(ctx); err != nil { + return fmt.Errorf("failed to terminate testcontainer: %s", err) + } + glog.Infof("PostgreSQL testcontainer stopped") + } + + return nil +} + +func (f *Testcontainer) ResetDB() { + // For testcontainers, we can just truncate all tables + ctx := context.Background() + g2 := f.New(ctx) + + // Truncate all business tables in the correct order (respecting FK constraints) + // Using CASCADE to handle foreign key constraints automatically + tables := []string{ + "adapter_statuses", // Polymorphic table, no FK constraints + "node_pools", // Has FK to clusters + "clusters", // Referenced by node_pools + "events", // Independent table + } + for _, table := range tables { + if g2.Migrator().HasTable(table) { + if err := g2.Exec(fmt.Sprintf("TRUNCATE TABLE %s CASCADE", table)).Error; err != nil { + glog.Errorf("Error truncating table %s: %s", table, err) + } + } + } +} + +func (f *Testcontainer) NewListener(ctx context.Context, channel string, callback func(id string)) { + // Get the connection string for the listener + connStr, err := f.container.ConnectionString(ctx, "sslmode=disable") + if err != nil { + glog.Errorf("Failed to get connection string for listener: %s", err) + return + } + + newListener(ctx, connStr, channel, callback) +} diff --git a/pkg/db/migrations.go b/pkg/db/migrations.go new file mode 100755 index 0000000..e65e7af --- /dev/null +++ b/pkg/db/migrations.go @@ -0,0 +1,39 @@ +package db + +import ( + "context" + + "github.com/go-gormigrate/gormigrate/v2" + "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/migrations" + + "gorm.io/gorm" +) + +// gormigrate is a wrapper for gorm's migration functions that adds schema versioning and rollback capabilities. +// For help writing migration steps, see the gorm documentation on migrations: http://doc.gorm.io/database.html#migration + +func Migrate(g2 *gorm.DB) error { + m := newGormigrate(g2) + + if err := m.Migrate(); err != nil { + return err + } + return nil +} + +// MigrateTo a specific migration will not seed the database, seeds are up to date with the latest +// schema based on the most recent migration +// This should be for testing purposes mainly +func MigrateTo(sessionFactory SessionFactory, migrationID string) { + g2 := sessionFactory.New(context.Background()) + m := newGormigrate(g2) + + if err := m.MigrateTo(migrationID); err != nil { + glog.Fatalf("Could not migrate: %v", err) + } +} + +func newGormigrate(g2 *gorm.DB) *gormigrate.Gormigrate { + return gormigrate.New(g2, gormigrate.DefaultOptions, migrations.MigrationList) +} diff --git a/pkg/db/migrations/202511111044_add_clusters.go b/pkg/db/migrations/202511111044_add_clusters.go new file mode 100644 index 0000000..f093ccc --- /dev/null +++ b/pkg/db/migrations/202511111044_add_clusters.go @@ -0,0 +1,90 @@ +package migrations + +// Migrations should NEVER use types from other packages. Types can change +// and then migrations run on a _new_ database will fail or behave unexpectedly. +// Instead of importing types, always re-create the type in the migration, as +// is done here, even though the same type is defined in pkg/api + +import ( + "gorm.io/gorm" + + "github.com/go-gormigrate/gormigrate/v2" +) + +func addClusters() *gormigrate.Migration { + return &gormigrate.Migration{ + ID: "202511111044", + Migrate: func(tx *gorm.DB) error { + // Create clusters table + // ClusterStatus is stored as JSONB in status_adapters, and status fields + // are flattened for efficient querying + createTableSQL := ` + CREATE TABLE IF NOT EXISTS clusters ( + id VARCHAR(255) PRIMARY KEY, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ NULL, + + -- Core fields + kind VARCHAR(255) NOT NULL DEFAULT 'Cluster', + name VARCHAR(63) NOT NULL, + spec JSONB NOT NULL, + labels JSONB NULL, + href VARCHAR(500), + + -- Version control + generation INTEGER NOT NULL DEFAULT 1, + + -- Status fields (flattened for efficient querying) + status_phase VARCHAR(50) NOT NULL DEFAULT 'NotReady', + status_last_transition_time TIMESTAMPTZ NULL, + status_observed_generation INTEGER NOT NULL DEFAULT 0, + status_updated_at TIMESTAMPTZ NULL, + status_adapters JSONB NULL, + + -- Audit fields + created_by VARCHAR(255) NOT NULL, + updated_by VARCHAR(255) NOT NULL + ); + ` + + if err := tx.Exec(createTableSQL).Error; err != nil { + return err + } + + // Create index on deleted_at for soft deletes + if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_clusters_deleted_at ON clusters(deleted_at);").Error; err != nil { + return err + } + + // Create unique index on name (only for non-deleted records) + if err := tx.Exec("CREATE UNIQUE INDEX IF NOT EXISTS idx_clusters_name ON clusters(name) WHERE deleted_at IS NULL;").Error; err != nil { + return err + } + + // Create index on status_phase for filtering + if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_clusters_status_phase ON clusters(status_phase);").Error; err != nil { + return err + } + + return nil + }, + Rollback: func(tx *gorm.DB) error { + // Drop indexes first + if err := tx.Exec("DROP INDEX IF EXISTS idx_clusters_status_phase;").Error; err != nil { + return err + } + if err := tx.Exec("DROP INDEX IF EXISTS idx_clusters_name;").Error; err != nil { + return err + } + if err := tx.Exec("DROP INDEX IF EXISTS idx_clusters_deleted_at;").Error; err != nil { + return err + } + // Drop table + if err := tx.Exec("DROP TABLE IF EXISTS clusters;").Error; err != nil { + return err + } + return nil + }, + } +} diff --git a/pkg/db/migrations/202511111055_add_node_pools.go b/pkg/db/migrations/202511111055_add_node_pools.go new file mode 100644 index 0000000..b06b096 --- /dev/null +++ b/pkg/db/migrations/202511111055_add_node_pools.go @@ -0,0 +1,111 @@ +package migrations + +// Migrations should NEVER use types from other packages. Types can change +// and then migrations run on a _new_ database will fail or behave unexpectedly. +// Instead of importing types, always re-create the type in the migration, as +// is done here, even though the same type is defined in pkg/api + +import ( + "gorm.io/gorm" + + "github.com/go-gormigrate/gormigrate/v2" +) + +func addNodePools() *gormigrate.Migration { + return &gormigrate.Migration{ + ID: "202511111055", + Migrate: func(tx *gorm.DB) error { + // Create node_pools table + createTableSQL := ` + CREATE TABLE IF NOT EXISTS node_pools ( + id VARCHAR(255) PRIMARY KEY, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ NULL, + + -- Core fields + kind VARCHAR(255) NOT NULL DEFAULT 'NodePool', + name VARCHAR(255) NOT NULL, + spec JSONB NOT NULL, + labels JSONB NULL, + href VARCHAR(500), + + -- Owner References (flattened) + owner_id VARCHAR(255) NOT NULL, + owner_kind VARCHAR(50) NOT NULL, + owner_href VARCHAR(500) NULL, + + -- Version control + generation INTEGER NOT NULL DEFAULT 1, + + -- Status fields (flattened for efficient querying) + status_phase VARCHAR(50) NOT NULL DEFAULT 'NotReady', + status_last_transition_time TIMESTAMPTZ NULL, + status_observed_generation INTEGER NOT NULL DEFAULT 0, + status_updated_at TIMESTAMPTZ NULL, + status_adapters JSONB NULL, + + -- Audit fields + created_by VARCHAR(255) NOT NULL, + updated_by VARCHAR(255) NOT NULL + ); + ` + + if err := tx.Exec(createTableSQL).Error; err != nil { + return err + } + + // Create index on deleted_at for soft deletes + if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_node_pools_deleted_at ON node_pools(deleted_at);").Error; err != nil { + return err + } + + // Create index on owner_id for foreign key lookups + if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_node_pools_owner_id ON node_pools(owner_id);").Error; err != nil { + return err + } + + // Create index on status_phase for filtering + if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_node_pools_status_phase ON node_pools(status_phase);").Error; err != nil { + return err + } + + // Add foreign key constraint to clusters + addFKSQL := ` + ALTER TABLE node_pools + ADD CONSTRAINT fk_node_pools_clusters + FOREIGN KEY (owner_id) REFERENCES clusters(id) + ON DELETE RESTRICT ON UPDATE RESTRICT; + ` + if err := tx.Exec(addFKSQL).Error; err != nil { + return err + } + + return nil + }, + Rollback: func(tx *gorm.DB) error { + // Drop foreign key constraint first + if err := tx.Exec("ALTER TABLE node_pools DROP CONSTRAINT IF EXISTS fk_node_pools_clusters;").Error; err != nil { + return err + } + + // Drop indexes + if err := tx.Exec("DROP INDEX IF EXISTS idx_node_pools_status_phase;").Error; err != nil { + return err + } + if err := tx.Exec("DROP INDEX IF EXISTS idx_node_pools_owner_id;").Error; err != nil { + return err + } + if err := tx.Exec("DROP INDEX IF EXISTS idx_node_pools_deleted_at;").Error; err != nil { + return err + } + + // Drop table + if err := tx.Exec("DROP TABLE IF EXISTS node_pools;").Error; err != nil { + return err + } + + return nil + }, + } +} diff --git a/pkg/db/migrations/202511111105_add_adapter_status.go b/pkg/db/migrations/202511111105_add_adapter_status.go new file mode 100644 index 0000000..9bb21b9 --- /dev/null +++ b/pkg/db/migrations/202511111105_add_adapter_status.go @@ -0,0 +1,84 @@ +package migrations + +// Migrations should NEVER use types from other packages. Types can change +// and then migrations run on a _new_ database will fail or behave unexpectedly. +// Instead of importing types, always re-create the type in the migration, as +// is done here, even though the same type is defined in pkg/api + +import ( + "gorm.io/gorm" + + "github.com/go-gormigrate/gormigrate/v2" +) + +func addAdapterStatus() *gormigrate.Migration { + return &gormigrate.Migration{ + ID: "202511111105", + Migrate: func(tx *gorm.DB) error { + // Create adapter_statuses table + // This table uses polymorphic association to link to either clusters or node_pools + createTableSQL := ` + CREATE TABLE IF NOT EXISTS adapter_statuses ( + id VARCHAR(255) PRIMARY KEY, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + deleted_at TIMESTAMPTZ NULL, + + -- Polymorphic association + resource_type VARCHAR(20) NOT NULL, + resource_id VARCHAR(255) NOT NULL, + + -- Adapter information + adapter VARCHAR(255) NOT NULL, + observed_generation INTEGER NOT NULL, + + -- Stored as JSONB + conditions JSONB NOT NULL, + data JSONB NULL, + metadata JSONB NULL + ); + ` + + if err := tx.Exec(createTableSQL).Error; err != nil { + return err + } + + // Create index on deleted_at for soft deletes + if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_adapter_statuses_deleted_at ON adapter_statuses(deleted_at);").Error; err != nil { + return err + } + + // Create composite index on resource_type and resource_id for lookups + if err := tx.Exec("CREATE INDEX IF NOT EXISTS idx_adapter_statuses_resource ON adapter_statuses(resource_type, resource_id);").Error; err != nil { + return err + } + + // Create unique index on resource_type, resource_id, and adapter + // This ensures one adapter status per resource per adapter + if err := tx.Exec("CREATE UNIQUE INDEX IF NOT EXISTS idx_adapter_statuses_unique ON adapter_statuses(resource_type, resource_id, adapter) WHERE deleted_at IS NULL;").Error; err != nil { + return err + } + + return nil + }, + Rollback: func(tx *gorm.DB) error { + // Drop indexes + if err := tx.Exec("DROP INDEX IF EXISTS idx_adapter_statuses_unique;").Error; err != nil { + return err + } + if err := tx.Exec("DROP INDEX IF EXISTS idx_adapter_statuses_resource;").Error; err != nil { + return err + } + if err := tx.Exec("DROP INDEX IF EXISTS idx_adapter_statuses_deleted_at;").Error; err != nil { + return err + } + + // Drop table + if err := tx.Exec("DROP TABLE IF EXISTS adapter_statuses;").Error; err != nil { + return err + } + + return nil + }, + } +} diff --git a/pkg/db/migrations/migration_structs.go b/pkg/db/migrations/migration_structs.go new file mode 100755 index 0000000..c422587 --- /dev/null +++ b/pkg/db/migrations/migration_structs.go @@ -0,0 +1,63 @@ +package migrations + +import ( + "fmt" + "time" + + "gorm.io/gorm" + + "github.com/go-gormigrate/gormigrate/v2" +) + +// gormigrate is a wrapper for gorm's migration functions that adds schema versioning and rollback capabilities. +// For help writing migration steps, see the gorm documentation on migrations: http://doc.gorm.io/database.html#migration + +// MigrationList rules: +// +// 1. IDs are numerical timestamps that must sort ascending. +// Use YYYYMMDDHHMM w/ 24 hour time for format +// Example: August 21 2018 at 2:54pm would be 201808211454. +// +// 2. Include models inline with migrations to see the evolution of the object over time. +// Using our internal type models directly in the first migration would fail in future clean installs. +// +// 3. Migrations must be backwards compatible. There are no new required fields allowed. +// See $project_home/g2/README.md +// +// 4. Create one function in a separate file that returns your Migration. Add that single function call to this list. +var MigrationList = []*gormigrate.Migration{ + // addEvents(), // REMOVED: Events table no longer used - no event-driven components + addClusters(), + addNodePools(), + addAdapterStatus(), +} + +// Model represents the base model struct. All entities will have this struct embedded. +type Model struct { + ID string `gorm:"primary_key"` + CreatedAt time.Time + UpdatedAt time.Time + DeletedAt gorm.DeletedAt `gorm:"index"` +} + +type fkMigration struct { + Model string + Dest string + Field string + Reference string +} + +func CreateFK(g2 *gorm.DB, fks ...fkMigration) error { + var query = `ALTER TABLE %s ADD CONSTRAINT %s FOREIGN KEY (%s) REFERENCES %s ON DELETE RESTRICT ON UPDATE RESTRICT;` + var drop = `ALTER TABLE %s DROP CONSTRAINT IF EXISTS %s;` + + for _, fk := range fks { + name := fmt.Sprintf("fk_%s_%s", fk.Model, fk.Dest) + + g2.Exec(fmt.Sprintf(drop, fk.Model, name)) + if err := g2.Exec(fmt.Sprintf(query, fk.Model, name, fk.Field, fk.Reference)).Error; err != nil { + return err + } + } + return nil +} diff --git a/pkg/db/mocks/session_factory.go b/pkg/db/mocks/session_factory.go new file mode 100755 index 0000000..18f8396 --- /dev/null +++ b/pkg/db/mocks/session_factory.go @@ -0,0 +1,79 @@ +package mocks + +import ( + "context" + "database/sql" + + "github.com/DATA-DOG/go-sqlmock" + "gorm.io/driver/postgres" + "gorm.io/gorm" + "gorm.io/gorm/logger" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" +) + +var _ db.SessionFactory = &MockSessionFactory{} + +type MockSessionFactory struct { + gormDB *gorm.DB + sqlDB *sql.DB + mock sqlmock.Sqlmock +} + +// NewMockSessionFactory creates a SessionFactory using go-sqlmock. +// This provides a mock database without requiring PostgreSQL or SQLite. +func NewMockSessionFactory() *MockSessionFactory { + // Create mock SQL database + sqlDB, mock, err := sqlmock.New() + if err != nil { + panic("failed to create sqlmock: " + err.Error()) + } + + // Open GORM with the mock database using postgres dialector + gormDB, err := gorm.Open(postgres.New(postgres.Config{ + Conn: sqlDB, + }), &gorm.Config{ + Logger: logger.Default.LogMode(logger.Silent), + }) + if err != nil { + panic("failed to create gorm DB with sqlmock: " + err.Error()) + } + + return &MockSessionFactory{ + gormDB: gormDB, + sqlDB: sqlDB, + mock: mock, + } +} + +func (m *MockSessionFactory) Init(config *config.DatabaseConfig) { + // Mock implementation - does nothing +} + +func (m *MockSessionFactory) DirectDB() *sql.DB { + return m.sqlDB +} + +func (m *MockSessionFactory) New(ctx context.Context) *gorm.DB { + return m.gormDB.WithContext(ctx) +} + +func (m *MockSessionFactory) CheckConnection() error { + return nil +} + +func (m *MockSessionFactory) Close() error { + if m.sqlDB != nil { + return m.sqlDB.Close() + } + return nil +} + +func (m *MockSessionFactory) ResetDB() { + // Mock implementation - does nothing +} + +func (m *MockSessionFactory) NewListener(ctx context.Context, channel string, callback func(id string)) { + // Mock implementation - does nothing +} diff --git a/pkg/db/session.go b/pkg/db/session.go new file mode 100755 index 0000000..a124751 --- /dev/null +++ b/pkg/db/session.go @@ -0,0 +1,20 @@ +package db + +import ( + "context" + "database/sql" + + "gorm.io/gorm" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" +) + +type SessionFactory interface { + Init(*config.DatabaseConfig) + DirectDB() *sql.DB + New(ctx context.Context) *gorm.DB + CheckConnection() error + Close() error + ResetDB() + NewListener(ctx context.Context, channel string, callback func(id string)) +} diff --git a/pkg/db/sql_helpers.go b/pkg/db/sql_helpers.go new file mode 100755 index 0000000..c106ca6 --- /dev/null +++ b/pkg/db/sql_helpers.go @@ -0,0 +1,249 @@ +package db + +import ( + "fmt" + "reflect" + "strings" + + "github.com/jinzhu/inflection" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/yaacov/tree-search-language/pkg/tsl" + "gorm.io/gorm" +) + +// Check if a field name starts with properties. +func startsWithProperties(s string) bool { + return strings.HasPrefix(s, "properties.") +} + +// hasProperty return true if node has a property identifier on left hand side. +func hasProperty(n tsl.Node) bool { + // Get the left side operator. + l, ok := n.Left.(tsl.Node) + if !ok { + return false + } + + // If left side hand is not a `properties` identifier, return. + if l.Func != tsl.IdentOp || !startsWithProperties(l.Left.(string)) { + return false + } + + return true +} + +// getField gets the sql field associated with a name. +func getField(name string, disallowedFields map[string]string) (field string, err *errors.ServiceError) { + // We want to accept names with trailing and leading spaces + trimmedName := strings.Trim(name, " ") + + // Check for properties ->> '' + if strings.HasPrefix(trimmedName, "properties ->>") { + field = trimmedName + return + } + + // Check for nested field, e.g., subscription_labels.key + checkName := trimmedName + fieldParts := strings.Split(trimmedName, ".") + if len(fieldParts) > 2 { + err = errors.BadRequest("%s is not a valid field name", name) + return + } + if len(fieldParts) > 1 { + checkName = fieldParts[1] + } + + // Check for allowed fields + _, ok := disallowedFields[checkName] + if ok { + err = errors.BadRequest("%s is not a valid field name", name) + return + } + field = trimmedName + return +} + +// propertiesNodeConverter converts a node with a properties identifier +// to a properties node. +// +// For example, it will convert: +// ( properties. = ) to +// ( properties ->> = ) +func propertiesNodeConverter(n tsl.Node) tsl.Node { + + // Get the left side operator. + l, ok := n.Left.(tsl.Node) + if !ok { + return n + } + + // Get the property name. + propetyName := l.Left.(string)[11:] + + // Build a new node that converts: + // ( properties. = ) to + // ( properties ->> = ) + propertyNode := tsl.Node{ + Func: n.Func, + Left: tsl.Node{ + Func: tsl.IdentOp, + Left: fmt.Sprintf("properties ->> '%s'", propetyName), + }, + Right: n.Right, + } + + return propertyNode +} + +// FieldNameWalk walks on the filter tree and check/replace +// the search fields names: +// a. the the field name is valid. +// b. replace the field name with the SQL column name. +func FieldNameWalk( + n tsl.Node, + disallowedFields map[string]string) (newNode tsl.Node, err *errors.ServiceError) { + + var field string + var l, r tsl.Node + + // Check for properties. = nodes, and convert them to + // ( properties ->> = ) + // nodes. + if hasProperty(n) { + n = propertiesNodeConverter(n) + } + + switch n.Func { + case tsl.IdentOp: + // If this is an Identifier, check field name is a string. + userFieldName, ok := n.Left.(string) + if !ok { + err = errors.BadRequest("Identifier name must be a string") + return + } + + // Check field name in the disallowedFields field names. + field, err = getField(userFieldName, disallowedFields) + if err != nil { + return + } + + // Replace identifier name. + newNode = tsl.Node{Func: tsl.IdentOp, Left: field} + case tsl.StringOp, tsl.NumberOp: + // This are leafs, just return. + newNode = tsl.Node{Func: n.Func, Left: n.Left} + default: + // o/w continue walking the tree. + if n.Left != nil { + l, err = FieldNameWalk(n.Left.(tsl.Node), disallowedFields) + if err != nil { + return + } + } + + // Add right child(ren) if exist. + if n.Right != nil { + switch v := n.Right.(type) { + case tsl.Node: + // It's a regular node, just add it. + r, err = FieldNameWalk(v, disallowedFields) + if err != nil { + return + } + + newNode = tsl.Node{Func: n.Func, Left: l, Right: r} + case []tsl.Node: + // It's a list of nodes, some TSL operators have multiple RHS arguments + // for example `IN` and `BETWEEN`. If this operator has a list of arguments, + // loop over the list, and add all nodes. + var rr []tsl.Node + + // Add all nodes in the right side array. + for _, e := range v { + r, err = FieldNameWalk(e, disallowedFields) + if err != nil { + return + } + + rr = append(rr, r) + } + + newNode = tsl.Node{Func: n.Func, Left: l, Right: rr} + default: + // We only support `Node` and `[]Node` types for the right hand side, + // of TSL operators. If here than this is an unsupported right hand side + // type. + err = errors.BadRequest("unsupported right hand side type in search query") + } + } else { + // If here than `n.Right` is nil. This is a legit type of node, + // we just need to ignore the right hand side, and continue walking the + // tree. + newNode = tsl.Node{Func: n.Func, Left: l} + } + } + + return +} + +// cleanOrderBy takes the orderBy arg and cleans it. +func cleanOrderBy(userArg string, disallowedFields map[string]string) (orderBy string, err *errors.ServiceError) { + var orderField string + + // We want to accept user params with trailing and leading spaces + trimedName := strings.Trim(userArg, " ") + + // Each OrderBy can be a "" or a " asc|desc" + order := strings.Split(trimedName, " ") + direction := "none valid" + + if len(order) == 1 { + orderField, err = getField(order[0], disallowedFields) + direction = "asc" + } else if len(order) == 2 { + orderField, err = getField(order[0], disallowedFields) + direction = order[1] + } + if err != nil || (direction != "asc" && direction != "desc") { + err = errors.BadRequest("bad order value '%s'", userArg) + return + } + + orderBy = fmt.Sprintf("%s %s", orderField, direction) + return +} + +// ArgsToOrderBy returns cleaned orderBy list. +func ArgsToOrderBy( + orderByArgs []string, + disallowedFields map[string]string) (orderBy []string, err *errors.ServiceError) { + + var order string + if len(orderByArgs) != 0 { + orderBy = []string{} + for _, o := range orderByArgs { + order, err = cleanOrderBy(o, disallowedFields) + if err != nil { + return + } + + // If valid add the user entered order by, to the order by list + orderBy = append(orderBy, order) + } + } + return +} + +func GetTableName(g2 *gorm.DB) string { + if g2.Statement.Parse(g2.Statement.Model) != nil { + return "xxx" + } + if g2.Statement.Schema != nil { + return g2.Statement.Schema.Table + } else { + name := reflect.TypeOf(g2.Statement.Model).Elem().Name() + return inflection.Plural(strings.ToLower(name)) + } +} diff --git a/pkg/db/transaction/transaction.go b/pkg/db/transaction/transaction.go new file mode 100755 index 0000000..246fda0 --- /dev/null +++ b/pkg/db/transaction/transaction.go @@ -0,0 +1,67 @@ +package transaction + +import ( + "database/sql" + "errors" +) + +// By default do no roll back transaction. +// only perform rollback if explicitly set by g2.g2.MarkForRollback(ctx, err) +const defaultRollbackPolicy = false + +// Transaction represents an sql transaction +type Transaction struct { + rollbackFlag bool + tx *sql.Tx + txid int64 +} + +// Build Creates a new transaction object +func Build(tx *sql.Tx, id int64, rollbackFlag bool) *Transaction { + return &Transaction{ + tx: tx, + txid: id, + rollbackFlag: defaultRollbackPolicy, + } +} + +// MarkedForRollback returns true if a transaction is flagged for rollback and false otherwise. +func (tx *Transaction) MarkedForRollback() bool { + return tx.rollbackFlag +} + +func (tx *Transaction) Tx() *sql.Tx { + return tx.tx +} + +func (tx *Transaction) TxID() int64 { + return tx.txid +} + +func (tx *Transaction) Commit() error { + // tx must exits + if tx.tx == nil { + return errors.New("db: transaction hasn't been started yet") + } + + // must call commit on 'g2' which is Gorm + // do *not* call commit on the underlying transaction itself. Gorm does that. + err := tx.tx.Commit() + tx.tx = nil + return err +} + +// Rollback ends the transaction by rolling back +func (tx *Transaction) Rollback() error { + // tx must exist + if tx.tx == nil { + return errors.New("db: transaction hasn't been started yet") + } + err := tx.tx.Rollback() + tx.tx = nil + return err +} + +func (tx *Transaction) SetRollbackFlag(flag bool) { + tx.rollbackFlag = flag +} diff --git a/pkg/db/transaction_middleware.go b/pkg/db/transaction_middleware.go new file mode 100755 index 0000000..37936d9 --- /dev/null +++ b/pkg/db/transaction_middleware.go @@ -0,0 +1,47 @@ +package db + +import ( + "encoding/json" + "net/http" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +// TransactionMiddleware creates a new HTTP middleware that begins a database transaction +// and stores it in the request context. +func TransactionMiddleware(next http.Handler, connection SessionFactory) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + // Create a new Context with the transaction stored in it. + ctx, err := NewContext(r.Context(), connection) + log := logger.NewOCMLogger(ctx) + if err != nil { + log.Extra("error", err.Error()).Error("Could not create transaction") + // use default error to avoid exposing internals to users + err := errors.GeneralError("") + operationID := logger.GetOperationID(ctx) + writeJSONResponse(w, err.HttpCode, err.AsOpenapiError(operationID)) + return + } + + // Set the value of the request pointer to the value of a new copy of the request with the new context key,vale + // stored in it + *r = *r.WithContext(ctx) + + // Returned from handlers and resolve transactions. + defer func() { Resolve(r.Context()) }() + + // Continue handling requests. + next.ServeHTTP(w, r) + }) +} + +func writeJSONResponse(w http.ResponseWriter, code int, payload interface{}) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(code) + + if payload != nil { + response, _ := json.Marshal(payload) + _, _ = w.Write(response) + } +} diff --git a/pkg/db/transactions.go b/pkg/db/transactions.go new file mode 100755 index 0000000..3a181d0 --- /dev/null +++ b/pkg/db/transactions.go @@ -0,0 +1,38 @@ +package db + +import ( + "context" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/transaction" +) + +// By default do no roll back transaction. +// only perform rollback if explicitly set by g2.g2.MarkForRollback(ctx, err) +const defaultRollbackPolicy = false + +// newTransaction constructs a new Transaction object. +func newTransaction(ctx context.Context, connection SessionFactory) (*transaction.Transaction, error) { + if connection == nil { + // This happens in non-integration tests + return nil, nil + } + + dbx := connection.DirectDB() + tx, err := dbx.Begin() + if err != nil { + return nil, err + } + + // current transaction ID set by postgres. these are *not* distinct across time + // and do get reset after postgres performs "vacuuming" to reclaim used IDs. + var txid int64 + row := tx.QueryRow("select txid_current()") + if row != nil { + err := row.Scan(&txid) + if err != nil { + return nil, err + } + } + + return transaction.Build(tx, txid, defaultRollbackPolicy), nil +} diff --git a/pkg/errors/errors.go b/pkg/errors/errors.go new file mode 100755 index 0000000..fc4da7c --- /dev/null +++ b/pkg/errors/errors.go @@ -0,0 +1,207 @@ +package errors + +import ( + "fmt" + "net/http" + "strconv" + + "github.com/golang/glog" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" +) + +const ( + // Prefix used for error code strings + // Example: + // ErrorCodePrefix = "rh-text" + // results in: hyperfleet-1 + ErrorCodePrefix = "hyperfleet" + + // HREF for API errors + ErrorHref = "/api/hyperfleet/v1/errors/" + + // InvalidToken occurs when a token is invalid (generally, not found in the database) + ErrorInvalidToken ServiceErrorCode = 1 + + // Forbidden occurs when a user has been blacklisted + ErrorForbidden ServiceErrorCode = 4 + + // Conflict occurs when a database constraint is violated + ErrorConflict ServiceErrorCode = 6 + + // NotFound occurs when a record is not found in the database + ErrorNotFound ServiceErrorCode = 7 + + // Validation occurs when an object fails validation + ErrorValidation ServiceErrorCode = 8 + + // General occurs when an error fails to match any other error code + ErrorGeneral ServiceErrorCode = 9 + + // NotImplemented occurs when an API REST method is not implemented in a handler + ErrorNotImplemented ServiceErrorCode = 10 + + // Unauthorized occurs when the requester is not authorized to perform the specified action + ErrorUnauthorized ServiceErrorCode = 11 + + // Unauthenticated occurs when the provided credentials cannot be validated + ErrorUnauthenticated ServiceErrorCode = 15 + + // MalformedRequest occurs when the request body cannot be read + ErrorMalformedRequest ServiceErrorCode = 17 + + // Bad Request + ErrorBadRequest ServiceErrorCode = 21 + + // Invalid Search Query + ErrorFailedToParseSearch ServiceErrorCode = 23 + + // DatabaseAdvisoryLock occurs whe the advisory lock is failed to get + ErrorDatabaseAdvisoryLock ServiceErrorCode = 26 +) + +type ServiceErrorCode int + +type ServiceErrors []ServiceError + +func Find(code ServiceErrorCode) (bool, *ServiceError) { + for _, err := range Errors() { + if err.Code == code { + return true, &err + } + } + return false, nil +} + +func Errors() ServiceErrors { + return ServiceErrors{ + ServiceError{ErrorInvalidToken, "Invalid token provided", http.StatusForbidden}, + ServiceError{ErrorForbidden, "Forbidden to perform this action", http.StatusForbidden}, + ServiceError{ErrorConflict, "An entity with the specified unique values already exists", http.StatusConflict}, + ServiceError{ErrorNotFound, "Resource not found", http.StatusNotFound}, + ServiceError{ErrorValidation, "General validation failure", http.StatusBadRequest}, + ServiceError{ErrorGeneral, "Unspecified error", http.StatusInternalServerError}, + ServiceError{ErrorNotImplemented, "HTTP Method not implemented for this endpoint", http.StatusMethodNotAllowed}, + ServiceError{ErrorUnauthorized, "Account is unauthorized to perform this action", http.StatusForbidden}, + ServiceError{ErrorUnauthenticated, "Account authentication could not be verified", http.StatusUnauthorized}, + ServiceError{ErrorMalformedRequest, "Unable to read request body", http.StatusBadRequest}, + ServiceError{ErrorBadRequest, "Bad request", http.StatusBadRequest}, + ServiceError{ErrorFailedToParseSearch, "Failed to parse search query", http.StatusBadRequest}, + ServiceError{ErrorDatabaseAdvisoryLock, "Database advisory lock error", http.StatusInternalServerError}, + } +} + +type ServiceError struct { + // Code is the numeric and distinct ID for the error + Code ServiceErrorCode + // Reason is the context-specific reason the error was generated + Reason string + // HttopCode is the HttpCode associated with the error when the error is returned as an API response + HttpCode int +} + +// New Reason can be a string with format verbs, which will be replace by the specified values +func New(code ServiceErrorCode, reason string, values ...interface{}) *ServiceError { + // If the code isn't defined, use the general error code + var err *ServiceError + exists, err := Find(code) + if !exists { + glog.Errorf("Undefined error code used: %d", code) + err = &ServiceError{ErrorGeneral, "Unspecified error", 500} + } + + // If the reason is unspecified, use the default + if reason != "" { + err.Reason = fmt.Sprintf(reason, values...) + } + + return err +} + +func (e *ServiceError) Error() string { + return fmt.Sprintf("%s: %s", *CodeStr(e.Code), e.Reason) +} + +func (e *ServiceError) AsError() error { + return fmt.Errorf("%s", e.Error()) +} + +func (e *ServiceError) Is404() bool { + return e.Code == NotFound("").Code +} + +func (e *ServiceError) IsConflict() bool { + return e.Code == Conflict("").Code +} + +func (e *ServiceError) IsForbidden() bool { + return e.Code == Forbidden("").Code +} + +func (e *ServiceError) AsOpenapiError(operationID string) openapi.Error { + return openapi.Error{ + Kind: openapi.PtrString("Error"), + Id: openapi.PtrString(strconv.Itoa(int(e.Code))), + Href: Href(e.Code), + Code: CodeStr(e.Code), + Reason: openapi.PtrString(e.Reason), + OperationId: openapi.PtrString(operationID), + } +} + +func CodeStr(code ServiceErrorCode) *string { + return openapi.PtrString(fmt.Sprintf("%s-%d", ErrorCodePrefix, code)) +} + +func Href(code ServiceErrorCode) *string { + return openapi.PtrString(fmt.Sprintf("%s%d", ErrorHref, code)) +} + +func NotFound(reason string, values ...interface{}) *ServiceError { + return New(ErrorNotFound, reason, values...) +} + +func GeneralError(reason string, values ...interface{}) *ServiceError { + return New(ErrorGeneral, reason, values...) +} + +func Unauthorized(reason string, values ...interface{}) *ServiceError { + return New(ErrorUnauthorized, reason, values...) +} + +func Unauthenticated(reason string, values ...interface{}) *ServiceError { + return New(ErrorUnauthenticated, reason, values...) +} + +func Forbidden(reason string, values ...interface{}) *ServiceError { + return New(ErrorForbidden, reason, values...) +} + +func NotImplemented(reason string, values ...interface{}) *ServiceError { + return New(ErrorNotImplemented, reason, values...) +} + +func Conflict(reason string, values ...interface{}) *ServiceError { + return New(ErrorConflict, reason, values...) +} + +func Validation(reason string, values ...interface{}) *ServiceError { + return New(ErrorValidation, reason, values...) +} + +func MalformedRequest(reason string, values ...interface{}) *ServiceError { + return New(ErrorMalformedRequest, reason, values...) +} + +func BadRequest(reason string, values ...interface{}) *ServiceError { + return New(ErrorBadRequest, reason, values...) +} + +func FailedToParseSearch(reason string, values ...interface{}) *ServiceError { + message := fmt.Sprintf("Failed to parse search query: %s", reason) + return New(ErrorFailedToParseSearch, message, values...) +} + +func DatabaseAdvisoryLock(err error) *ServiceError { + return New(ErrorDatabaseAdvisoryLock, err.Error(), []string{}) +} diff --git a/pkg/errors/errors_test.go b/pkg/errors/errors_test.go new file mode 100755 index 0000000..a6fe11d --- /dev/null +++ b/pkg/errors/errors_test.go @@ -0,0 +1,25 @@ +package errors + +import ( + "testing" + + . "github.com/onsi/gomega" +) + +func TestErrorFormatting(t *testing.T) { + RegisterTestingT(t) + err := New(ErrorGeneral, "test %s, %d", "errors", 1) + Expect(err.Reason).To(Equal("test errors, 1")) +} + +func TestErrorFind(t *testing.T) { + RegisterTestingT(t) + exists, err := Find(ErrorNotFound) + Expect(exists).To(Equal(true)) + Expect(err.Code).To(Equal(ErrorNotFound)) + + // Hopefully we never reach 91,823,719 error codes or this test will fail + exists, err = Find(ServiceErrorCode(91823719)) + Expect(exists).To(Equal(false)) + Expect(err).To(BeNil()) +} diff --git a/pkg/handlers/cluster.go b/pkg/handlers/cluster.go new file mode 100644 index 0000000..2b9fcf7 --- /dev/null +++ b/pkg/handlers/cluster.go @@ -0,0 +1,152 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/presenters" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +var _ RestHandler = clusterHandler{} + +type clusterHandler struct { + cluster services.ClusterService + generic services.GenericService +} + +func NewClusterHandler(cluster services.ClusterService, generic services.GenericService) *clusterHandler { + return &clusterHandler{ + cluster: cluster, + generic: generic, + } +} + +func (h clusterHandler) Create(w http.ResponseWriter, r *http.Request) { + var req openapi.ClusterCreateRequest + cfg := &handlerConfig{ + &req, + []validate{ + validateEmpty(&req, "Id", "id"), + }, + func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + // Use the ClusterFromOpenAPICreate helper to convert the request + clusterModel := api.ClusterFromOpenAPICreate(&req, "system") + clusterModel, err := h.cluster.Create(ctx, clusterModel) + if err != nil { + return nil, err + } + return presenters.PresentCluster(clusterModel), nil + }, + handleError, + } + + handle(w, r, cfg, http.StatusCreated) +} + +func (h clusterHandler) Patch(w http.ResponseWriter, r *http.Request) { + var patch api.ClusterPatchRequest + + cfg := &handlerConfig{ + &patch, + []validate{}, + func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + id := mux.Vars(r)["id"] + found, err := h.cluster.Get(ctx, id) + if err != nil { + return nil, err + } + + //patch a field + if patch.Name != nil { + found.Name = *patch.Name + } + if patch.Spec != nil { + specJSON, _ := json.Marshal(*patch.Spec) + found.Spec = specJSON + } + if patch.Generation != nil { + found.Generation = *patch.Generation + } + + clusterModel, err := h.cluster.Replace(ctx, found) + if err != nil { + return nil, err + } + return presenters.PresentCluster(clusterModel), nil + }, + handleError, + } + + handle(w, r, cfg, http.StatusOK) +} + +func (h clusterHandler) List(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + + listArgs := services.NewListArguments(r.URL.Query()) + var clusters []api.Cluster + paging, err := h.generic.List(ctx, "username", listArgs, &clusters) + if err != nil { + return nil, err + } + clusterList := openapi.ClusterList{ + Kind: "ClusterList", + Page: int32(paging.Page), + Size: int32(paging.Size), + Total: int32(paging.Total), + Items: []openapi.Cluster{}, + } + + for _, cluster := range clusters { + converted := presenters.PresentCluster(&cluster) + clusterList.Items = append(clusterList.Items, converted) + } + if listArgs.Fields != nil { + filteredItems, err := presenters.SliceFilter(listArgs.Fields, clusterList.Items) + if err != nil { + return nil, err + } + return filteredItems, nil + } + return clusterList, nil + }, + } + + handleList(w, r, cfg) +} + +func (h clusterHandler) Get(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + id := mux.Vars(r)["id"] + ctx := r.Context() + cluster, err := h.cluster.Get(ctx, id) + if err != nil { + return nil, err + } + + return presenters.PresentCluster(cluster), nil + }, + } + + handleGet(w, r, cfg) +} + +func (h clusterHandler) Delete(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + return nil, errors.NotImplemented("delete") + }, + } + handleDelete(w, r, cfg, http.StatusNoContent) +} diff --git a/pkg/handlers/cluster_nodepools.go b/pkg/handlers/cluster_nodepools.go new file mode 100644 index 0000000..9aa3a9d --- /dev/null +++ b/pkg/handlers/cluster_nodepools.go @@ -0,0 +1,157 @@ +package handlers + +import ( + "net/http" + + "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/presenters" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +type clusterNodePoolsHandler struct { + clusterService services.ClusterService + nodePoolService services.NodePoolService + generic services.GenericService +} + +func NewClusterNodePoolsHandler(clusterService services.ClusterService, nodePoolService services.NodePoolService, generic services.GenericService) *clusterNodePoolsHandler { + return &clusterNodePoolsHandler{ + clusterService: clusterService, + nodePoolService: nodePoolService, + generic: generic, + } +} + +// List returns all nodepools for a cluster +func (h clusterNodePoolsHandler) List(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + clusterID := mux.Vars(r)["id"] + + // Verify cluster exists + _, err := h.clusterService.Get(ctx, clusterID) + if err != nil { + return nil, err + } + + // Get nodepools with owner_id = clusterID + listArgs := services.NewListArguments(r.URL.Query()) + // Add filter for owner_id + if listArgs.Search == "" { + listArgs.Search = "owner_id = '" + clusterID + "'" + } else { + listArgs.Search = listArgs.Search + " AND owner_id = '" + clusterID + "'" + } + + var nodePools []api.NodePool + paging, err := h.generic.List(ctx, "username", listArgs, &nodePools) + if err != nil { + return nil, err + } + + // Build list response + items := make([]openapi.NodePool, 0, len(nodePools)) + for _, nodePool := range nodePools { + converted := presenters.PresentNodePool(&nodePool) + items = append(items, converted) + } + + nodePoolList := struct { + Kind string `json:"kind"` + Page int32 `json:"page"` + Size int32 `json:"size"` + Total int32 `json:"total"` + Items []openapi.NodePool `json:"items"` + }{ + Kind: "NodePoolList", + Page: int32(paging.Page), + Size: int32(paging.Size), + Total: int32(paging.Total), + Items: items, + } + + if listArgs.Fields != nil { + filteredItems, err := presenters.SliceFilter(listArgs.Fields, nodePoolList.Items) + if err != nil { + return nil, err + } + return filteredItems, nil + } + return nodePoolList, nil + }, + } + + handleList(w, r, cfg) +} + +// Get returns a specific nodepool for a cluster +func (h clusterNodePoolsHandler) Get(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + clusterID := mux.Vars(r)["id"] + nodePoolID := mux.Vars(r)["nodepool_id"] + + // Verify cluster exists + _, err := h.clusterService.Get(ctx, clusterID) + if err != nil { + return nil, err + } + + // Get nodepool + nodePool, err := h.nodePoolService.Get(ctx, nodePoolID) + if err != nil { + return nil, err + } + + // Verify nodepool belongs to this cluster + if nodePool.OwnerID != clusterID { + return nil, errors.NotFound("NodePool '%s' not found for cluster '%s'", nodePoolID, clusterID) + } + + return presenters.PresentNodePool(nodePool), nil + }, + } + + handleGet(w, r, cfg) +} + +// Create creates a new nodepool for a cluster +func (h clusterNodePoolsHandler) Create(w http.ResponseWriter, r *http.Request) { + var req openapi.NodePoolCreateRequest + cfg := &handlerConfig{ + &req, + []validate{ + validateEmpty(&req, "Id", "id"), + }, + func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + clusterID := mux.Vars(r)["id"] + + // Verify cluster exists + cluster, err := h.clusterService.Get(ctx, clusterID) + if err != nil { + return nil, err + } + + // Use the NodePoolFromOpenAPICreate helper to convert the request + nodePoolModel := api.NodePoolFromOpenAPICreate(&req, cluster.ID, "system") + + // Create nodepool + nodePoolModel, err = h.nodePoolService.Create(ctx, nodePoolModel) + if err != nil { + return nil, err + } + + return presenters.PresentNodePool(nodePoolModel), nil + }, + handleError, + } + + handle(w, r, cfg, http.StatusCreated) +} diff --git a/pkg/handlers/cluster_nodepools_test.go b/pkg/handlers/cluster_nodepools_test.go new file mode 100644 index 0000000..625aa38 --- /dev/null +++ b/pkg/handlers/cluster_nodepools_test.go @@ -0,0 +1,279 @@ +package handlers + +import ( + "context" + "encoding/json" + "net/http" + "net/http/httptest" + "testing" + "time" + + "github.com/gorilla/mux" + . "github.com/onsi/gomega" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +// Mock ClusterService +type mockClusterService struct { + getFunc func(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) +} + +func (m *mockClusterService) Get(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) { + if m.getFunc != nil { + return m.getFunc(ctx, id) + } + return nil, nil +} + +func (m *mockClusterService) Create(ctx context.Context, cluster *api.Cluster) (*api.Cluster, *errors.ServiceError) { + return nil, nil +} + +func (m *mockClusterService) Replace(ctx context.Context, cluster *api.Cluster) (*api.Cluster, *errors.ServiceError) { + return nil, nil +} + +func (m *mockClusterService) Delete(ctx context.Context, id string) *errors.ServiceError { + return nil +} + +func (m *mockClusterService) All(ctx context.Context) (api.ClusterList, *errors.ServiceError) { + return nil, nil +} + +func (m *mockClusterService) FindByIDs(ctx context.Context, ids []string) (api.ClusterList, *errors.ServiceError) { + return nil, nil +} + +func (m *mockClusterService) UpdateClusterStatusFromAdapters(ctx context.Context, clusterID string) (*api.Cluster, *errors.ServiceError) { + return nil, nil +} + +func (m *mockClusterService) OnUpsert(ctx context.Context, id string) error { + return nil +} + +func (m *mockClusterService) OnDelete(ctx context.Context, id string) error { + return nil +} + +// Mock NodePoolService +type mockNodePoolService struct { + getFunc func(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) +} + +func (m *mockNodePoolService) Get(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) { + if m.getFunc != nil { + return m.getFunc(ctx, id) + } + return nil, nil +} + +func (m *mockNodePoolService) Create(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, *errors.ServiceError) { + return nil, nil +} + +func (m *mockNodePoolService) Replace(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, *errors.ServiceError) { + return nil, nil +} + +func (m *mockNodePoolService) Delete(ctx context.Context, id string) *errors.ServiceError { + return nil +} + +func (m *mockNodePoolService) All(ctx context.Context) (api.NodePoolList, *errors.ServiceError) { + return nil, nil +} + +func (m *mockNodePoolService) FindByIDs(ctx context.Context, ids []string) (api.NodePoolList, *errors.ServiceError) { + return nil, nil +} + +func (m *mockNodePoolService) UpdateNodePoolStatusFromAdapters(ctx context.Context, nodePoolID string) (*api.NodePool, *errors.ServiceError) { + return nil, nil +} + +func (m *mockNodePoolService) OnUpsert(ctx context.Context, id string) error { + return nil +} + +func (m *mockNodePoolService) OnDelete(ctx context.Context, id string) error { + return nil +} + +// Mock GenericService +type mockGenericService struct{} + +func (m *mockGenericService) Get(ctx context.Context, username string, id string, resource interface{}) *errors.ServiceError { + return nil +} + +func (m *mockGenericService) Create(ctx context.Context, username string, resource interface{}) *errors.ServiceError { + return nil +} + +func (m *mockGenericService) List(ctx context.Context, username string, listArgs *services.ListArguments, resources interface{}) (*api.PagingMeta, *errors.ServiceError) { + return nil, nil +} + +func (m *mockGenericService) Update(ctx context.Context, username string, resource interface{}) *errors.ServiceError { + return nil +} + +func (m *mockGenericService) Delete(ctx context.Context, username string, resource interface{}) *errors.ServiceError { + return nil +} + +func TestClusterNodePoolsHandler_Get(t *testing.T) { + RegisterTestingT(t) + + now := time.Now() + clusterID := "test-cluster-123" + nodePoolID := "test-nodepool-456" + + tests := []struct { + name string + clusterID string + nodePoolID string + mockClusterFunc func(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) + mockNodePoolFunc func(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) + expectedStatusCode int + expectedError bool + }{ + { + name: "Success - Get nodepool by cluster and nodepool ID", + clusterID: clusterID, + nodePoolID: nodePoolID, + mockClusterFunc: func(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) { + return &api.Cluster{ + Meta: api.Meta{ + ID: clusterID, + CreatedAt: now, + UpdatedAt: now, + }, + Name: "test-cluster", + }, nil + }, + mockNodePoolFunc: func(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) { + return &api.NodePool{ + Meta: api.Meta{ + ID: nodePoolID, + CreatedAt: now, + UpdatedAt: now, + }, + Kind: "NodePool", + Name: "test-nodepool", + OwnerID: clusterID, + }, nil + }, + expectedStatusCode: http.StatusOK, + expectedError: false, + }, + { + name: "Error - Cluster not found", + clusterID: "non-existent", + nodePoolID: nodePoolID, + mockClusterFunc: func(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) { + return nil, errors.NotFound("Cluster not found") + }, + mockNodePoolFunc: func(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) { + return nil, nil + }, + expectedStatusCode: http.StatusNotFound, + expectedError: true, + }, + { + name: "Error - NodePool not found", + clusterID: clusterID, + nodePoolID: "non-existent", + mockClusterFunc: func(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) { + return &api.Cluster{ + Meta: api.Meta{ + ID: clusterID, + CreatedAt: now, + UpdatedAt: now, + }, + Name: "test-cluster", + }, nil + }, + mockNodePoolFunc: func(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) { + return nil, errors.NotFound("NodePool not found") + }, + expectedStatusCode: http.StatusNotFound, + expectedError: true, + }, + { + name: "Error - NodePool belongs to different cluster", + clusterID: clusterID, + nodePoolID: nodePoolID, + mockClusterFunc: func(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) { + return &api.Cluster{ + Meta: api.Meta{ + ID: clusterID, + CreatedAt: now, + UpdatedAt: now, + }, + Name: "test-cluster", + }, nil + }, + mockNodePoolFunc: func(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) { + return &api.NodePool{ + Meta: api.Meta{ + ID: nodePoolID, + CreatedAt: now, + UpdatedAt: now, + }, + Kind: "NodePool", + Name: "test-nodepool", + OwnerID: "different-cluster-789", // Different cluster + }, nil + }, + expectedStatusCode: http.StatusNotFound, + expectedError: true, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + RegisterTestingT(t) + + // Create mock services + mockClusterSvc := &mockClusterService{getFunc: tt.mockClusterFunc} + mockNodePoolSvc := &mockNodePoolService{getFunc: tt.mockNodePoolFunc} + mockGenericSvc := &mockGenericService{} + + // Create handler + handler := NewClusterNodePoolsHandler(mockClusterSvc, mockNodePoolSvc, mockGenericSvc) + + // Create request + req := httptest.NewRequest(http.MethodGet, "/api/hyperfleet/v1/clusters/"+tt.clusterID+"/nodepools/"+tt.nodePoolID, nil) + req = mux.SetURLVars(req, map[string]string{ + "id": tt.clusterID, + "nodepool_id": tt.nodePoolID, + }) + + // Create response recorder + rr := httptest.NewRecorder() + + // Call handler + handler.Get(rr, req) + + // Check status code + Expect(rr.Code).To(Equal(tt.expectedStatusCode)) + + if !tt.expectedError { + // Parse response + var response openapi.NodePool + err := json.Unmarshal(rr.Body.Bytes(), &response) + Expect(err).NotTo(HaveOccurred()) + Expect(*response.Id).To(Equal(nodePoolID)) + Expect(response.Kind).NotTo(BeNil()) + Expect(*response.Kind).To(Equal("NodePool")) + } + }) + } +} diff --git a/pkg/handlers/cluster_status.go b/pkg/handlers/cluster_status.go new file mode 100644 index 0000000..b45eb49 --- /dev/null +++ b/pkg/handlers/cluster_status.go @@ -0,0 +1,119 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +type clusterStatusHandler struct { + adapterStatusService services.AdapterStatusService + clusterService services.ClusterService +} + +func NewClusterStatusHandler(adapterStatusService services.AdapterStatusService, clusterService services.ClusterService) *clusterStatusHandler { + return &clusterStatusHandler{ + adapterStatusService: adapterStatusService, + clusterService: clusterService, + } +} + +// List returns all adapter statuses for a cluster with pagination +func (h clusterStatusHandler) List(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + clusterID := mux.Vars(r)["id"] + listArgs := services.NewListArguments(r.URL.Query()) + + // Fetch adapter statuses with pagination + adapterStatuses, total, err := h.adapterStatusService.FindByResourcePaginated(ctx, "Cluster", clusterID, listArgs) + if err != nil { + return nil, err + } + + // Convert to OpenAPI models + items := make([]openapi.AdapterStatus, 0, len(adapterStatuses)) + for _, as := range adapterStatuses { + items = append(items, *as.ToOpenAPI()) + } + + // Return list response with pagination metadata + response := openapi.AdapterStatusList{ + Kind: "AdapterStatusList", + Items: items, + Page: int32(listArgs.Page), + Size: int32(len(items)), + Total: int32(total), + } + + return response, nil + }, + } + + handleList(w, r, cfg) +} + +// Create creates or updates an adapter status for a cluster +func (h clusterStatusHandler) Create(w http.ResponseWriter, r *http.Request) { + var req openapi.AdapterStatusCreateRequest + + cfg := &handlerConfig{ + &req, + []validate{ + validateNotEmpty(&req, "Adapter", "adapter"), + }, + func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + clusterID := mux.Vars(r)["id"] + + // Verify cluster exists + _, err := h.clusterService.Get(ctx, clusterID) + if err != nil { + return nil, err + } + + // Check if adapter status already exists + existing, _ := h.adapterStatusService.FindByResourceAndAdapter(ctx, "Cluster", clusterID, req.Adapter) + + var adapterStatus *api.AdapterStatus + if existing != nil { + // Update existing + existing.ObservedGeneration = req.ObservedGeneration + conditionsJSON, _ := json.Marshal(req.Conditions) + existing.Conditions = conditionsJSON + if req.Data != nil { + dataJSON, _ := json.Marshal(req.Data) + existing.Data = dataJSON + } + adapterStatus, err = h.adapterStatusService.Replace(ctx, existing) + } else { + // Create new + newStatus := api.AdapterStatusFromOpenAPICreate("Cluster", clusterID, &req) + adapterStatus, err = h.adapterStatusService.Create(ctx, newStatus) + } + + if err != nil { + return nil, err + } + + // Trigger status aggregation + _, aggregateErr := h.clusterService.UpdateClusterStatusFromAdapters(ctx, clusterID) + if aggregateErr != nil { + // Log error but don't fail the request + // The status will be computed on next update + } + + return adapterStatus.ToOpenAPI(), nil + }, + handleError, + } + + handle(w, r, cfg, http.StatusCreated) +} diff --git a/pkg/handlers/compatibility.go b/pkg/handlers/compatibility.go new file mode 100644 index 0000000..3626c86 --- /dev/null +++ b/pkg/handlers/compatibility.go @@ -0,0 +1,34 @@ +package handlers + +import ( + "net/http" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +type compatibilityHandler struct{} + +func NewCompatibilityHandler() *compatibilityHandler { + return &compatibilityHandler{} +} + +// Get returns API compatibility information +func (h compatibilityHandler) Get(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + response := map[string]interface{}{ + "api_version": "v1", + "compatible": true, + "features": []string{ + "clusters", + "nodepools", + "adapter_status", + "status_aggregation", + }, + } + return response, nil + }, + } + + handleGet(w, r, cfg) +} diff --git a/pkg/handlers/framework.go b/pkg/handlers/framework.go new file mode 100755 index 0000000..e01995b --- /dev/null +++ b/pkg/handlers/framework.go @@ -0,0 +1,128 @@ +package handlers + +import ( + "context" + "encoding/json" + "io" + "net/http" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +// handlerConfig defines the common things each REST controller must do. +// The corresponding handle() func runs the basic handlerConfig. +// This is not meant to be an HTTP framework or anything larger than simple CRUD in handlers. +// +// MarshalInto is a pointer to the object to hold the unmarshaled JSON. +// Validate is a list of validation function that run in order, returning fast on the first error. +// Action is the specific logic a handler must take (e.g, find an object, save an object) +// ErrorHandler is the way errors are returned to the client +type handlerConfig struct { + MarshalInto interface{} + Validate []validate + Action httpAction + ErrorHandler errorHandlerFunc +} + +type validate func() *errors.ServiceError +type errorHandlerFunc func(ctx context.Context, w http.ResponseWriter, err *errors.ServiceError) +type httpAction func() (interface{}, *errors.ServiceError) + +func handleError(ctx context.Context, w http.ResponseWriter, err *errors.ServiceError) { + log := logger.NewOCMLogger(ctx) + operationID := logger.GetOperationID(ctx) + // If this is a 400 error, its the user's issue, log as info rather than error + if err.HttpCode >= 400 && err.HttpCode <= 499 { + log.Infof(err.Error()) + } else { + log.Error(err.Error()) + } + writeJSONResponse(w, err.HttpCode, err.AsOpenapiError(operationID)) +} + +func handle(w http.ResponseWriter, r *http.Request, cfg *handlerConfig, httpStatus int) { + if cfg.ErrorHandler == nil { + cfg.ErrorHandler = handleError + } + + bytes, err := io.ReadAll(r.Body) + if err != nil { + handleError(r.Context(), w, errors.MalformedRequest("Unable to read request body: %s", err)) + return + } + + err = json.Unmarshal(bytes, &cfg.MarshalInto) + if err != nil { + handleError(r.Context(), w, errors.MalformedRequest("Invalid request format: %s", err)) + return + } + + for _, v := range cfg.Validate { + err := v() + if err != nil { + cfg.ErrorHandler(r.Context(), w, err) + return + } + } + + result, serviceErr := cfg.Action() + + switch { + case serviceErr != nil: + cfg.ErrorHandler(r.Context(), w, serviceErr) + default: + writeJSONResponse(w, httpStatus, result) + } + +} + +func handleDelete(w http.ResponseWriter, r *http.Request, cfg *handlerConfig, httpStatus int) { + if cfg.ErrorHandler == nil { + cfg.ErrorHandler = handleError + } + for _, v := range cfg.Validate { + err := v() + if err != nil { + cfg.ErrorHandler(r.Context(), w, err) + return + } + } + + result, serviceErr := cfg.Action() + + switch { + case serviceErr != nil: + cfg.ErrorHandler(r.Context(), w, serviceErr) + default: + writeJSONResponse(w, httpStatus, result) + } + +} + +func handleGet(w http.ResponseWriter, r *http.Request, cfg *handlerConfig) { + if cfg.ErrorHandler == nil { + cfg.ErrorHandler = handleError + } + + result, serviceErr := cfg.Action() + switch { + case serviceErr == nil: + writeJSONResponse(w, http.StatusOK, result) + default: + cfg.ErrorHandler(r.Context(), w, serviceErr) + } +} + +func handleList(w http.ResponseWriter, r *http.Request, cfg *handlerConfig) { + if cfg.ErrorHandler == nil { + cfg.ErrorHandler = handleError + } + + results, serviceError := cfg.Action() + if serviceError != nil { + cfg.ErrorHandler(r.Context(), w, serviceError) + return + } + writeJSONResponse(w, http.StatusOK, results) +} diff --git a/pkg/handlers/framework_test.go b/pkg/handlers/framework_test.go new file mode 100755 index 0000000..8672fed --- /dev/null +++ b/pkg/handlers/framework_test.go @@ -0,0 +1,19 @@ +package handlers + +import "net/http" + +type mockResponseWriter struct { + written string + status int +} + +func (m *mockResponseWriter) Header() http.Header { + return map[string][]string{} +} +func (m *mockResponseWriter) Write(b []byte) (int, error) { + m.written = string(b) + return 0, nil +} +func (m *mockResponseWriter) WriteHeader(code int) { + m.status = code +} diff --git a/pkg/handlers/helpers.go b/pkg/handlers/helpers.go new file mode 100755 index 0000000..85ba934 --- /dev/null +++ b/pkg/handlers/helpers.go @@ -0,0 +1,38 @@ +package handlers + +import ( + "encoding/json" + "net/http" + "reflect" +) + +func writeJSONResponse(w http.ResponseWriter, code int, payload interface{}) { + w.Header().Set("Content-Type", "application/json") + // By default, decide whether or not a cache is usable based on the matching of the JWT + // For example, this will keep caches from being used in the same browser if two users were to log in back to back + w.Header().Set("Vary", "Authorization") + + w.WriteHeader(code) + + if payload != nil { + response, _ := json.Marshal(payload) + _, _ = w.Write(response) + } +} + +// Prepare a 'list' of non-db-backed resources +func determineListRange(obj interface{}, page int, size int64) (list []interface{}, total int64) { + items := reflect.ValueOf(obj) + total = int64(items.Len()) + low := int64(page-1) * size + high := low + size + if low < 0 || low >= total || high >= total { + low = 0 + high = total + } + for i := low; i < high; i++ { + list = append(list, items.Index(int(i)).Interface()) + } + + return list, total +} diff --git a/pkg/handlers/metadata.go b/pkg/handlers/metadata.go new file mode 100755 index 0000000..0960cbf --- /dev/null +++ b/pkg/handlers/metadata.go @@ -0,0 +1,60 @@ +/* +Copyright (c) 2018 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package handlers + +import ( + "encoding/json" + "fmt" + "net/http" + + "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" +) + +type metadataHandler struct{} + +func NewMetadataHandler() *metadataHandler { + return &metadataHandler{} +} + +// Get sends API documentation response. +func (h metadataHandler) Get(w http.ResponseWriter, r *http.Request) { + // Set the content type: + w.Header().Set("Content-Type", "application/json") + + // Prepare the body: + body := api.Metadata{ + ID: "hyperfleet", + Kind: "API", + HREF: r.URL.Path, + Version: api.Version, + BuildTime: api.BuildTime, + } + data, err := json.Marshal(body) + if err != nil { + api.SendPanic(w, r) + return + } + + // Send the response: + _, err = w.Write(data) + if err != nil { + err = fmt.Errorf("can't send response body for request '%s'", r.URL.Path) + glog.Error(err) + return + } +} diff --git a/pkg/handlers/node_pool.go b/pkg/handlers/node_pool.go new file mode 100644 index 0000000..1057337 --- /dev/null +++ b/pkg/handlers/node_pool.go @@ -0,0 +1,163 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/presenters" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +var _ RestHandler = nodePoolHandler{} + +type nodePoolHandler struct { + nodePool services.NodePoolService + generic services.GenericService +} + +func NewNodePoolHandler(nodePool services.NodePoolService, generic services.GenericService) *nodePoolHandler { + return &nodePoolHandler{ + nodePool: nodePool, + generic: generic, + } +} + +func (h nodePoolHandler) Create(w http.ResponseWriter, r *http.Request) { + var req openapi.NodePoolCreateRequest + cfg := &handlerConfig{ + &req, + []validate{ + validateEmpty(&req, "Id", "id"), + }, + func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + // For standalone nodepools, owner_id would need to come from somewhere + // This is likely not a supported use case, but using empty string for now + nodePoolModel := api.NodePoolFromOpenAPICreate(&req, "", "system") + nodePoolModel, err := h.nodePool.Create(ctx, nodePoolModel) + if err != nil { + return nil, err + } + return presenters.PresentNodePool(nodePoolModel), nil + }, + handleError, + } + + handle(w, r, cfg, http.StatusCreated) +} + +func (h nodePoolHandler) Patch(w http.ResponseWriter, r *http.Request) { + var patch api.NodePoolPatchRequest + + cfg := &handlerConfig{ + &patch, + []validate{}, + func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + id := mux.Vars(r)["id"] + found, err := h.nodePool.Get(ctx, id) + if err != nil { + return nil, err + } + + //patch a field + if patch.Name != nil { + found.Name = *patch.Name + } + if patch.Spec != nil { + specJSON, _ := json.Marshal(*patch.Spec) + found.Spec = specJSON + } + // Note: OwnerID should not be changed after creation + // if patch.OwnerID != nil { + // found.OwnerID = *patch.OwnerID + // } + + nodePoolModel, err := h.nodePool.Replace(ctx, found) + if err != nil { + return nil, err + } + return presenters.PresentNodePool(nodePoolModel), nil + }, + handleError, + } + + handle(w, r, cfg, http.StatusOK) +} + +func (h nodePoolHandler) List(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + + listArgs := services.NewListArguments(r.URL.Query()) + var nodePools []api.NodePool + paging, err := h.generic.List(ctx, "username", listArgs, &nodePools) + if err != nil { + return nil, err + } + // Build list response manually since there's no NodePoolList in OpenAPI + items := make([]openapi.NodePool, 0, len(nodePools)) + + for _, nodePool := range nodePools { + converted := presenters.PresentNodePool(&nodePool) + items = append(items, converted) + } + + nodePoolList := struct { + Kind string `json:"kind"` + Page int32 `json:"page"` + Size int32 `json:"size"` + Total int32 `json:"total"` + Items []openapi.NodePool `json:"items"` + }{ + Kind: "NodePoolList", + Page: int32(paging.Page), + Size: int32(paging.Size), + Total: int32(paging.Total), + Items: items, + } + if listArgs.Fields != nil { + filteredItems, err := presenters.SliceFilter(listArgs.Fields, nodePoolList.Items) + if err != nil { + return nil, err + } + return filteredItems, nil + } + return nodePoolList, nil + }, + } + + handleList(w, r, cfg) +} + +func (h nodePoolHandler) Get(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + id := mux.Vars(r)["id"] + ctx := r.Context() + nodePool, err := h.nodePool.Get(ctx, id) + if err != nil { + return nil, err + } + + return presenters.PresentNodePool(nodePool), nil + }, + } + + handleGet(w, r, cfg) +} + +func (h nodePoolHandler) Delete(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + return nil, errors.NotImplemented("delete") + }, + } + handleDelete(w, r, cfg, http.StatusNoContent) +} diff --git a/pkg/handlers/nodepool_status.go b/pkg/handlers/nodepool_status.go new file mode 100644 index 0000000..582770f --- /dev/null +++ b/pkg/handlers/nodepool_status.go @@ -0,0 +1,119 @@ +package handlers + +import ( + "encoding/json" + "net/http" + + "github.com/gorilla/mux" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +type nodePoolStatusHandler struct { + adapterStatusService services.AdapterStatusService + nodePoolService services.NodePoolService +} + +func NewNodePoolStatusHandler(adapterStatusService services.AdapterStatusService, nodePoolService services.NodePoolService) *nodePoolStatusHandler { + return &nodePoolStatusHandler{ + adapterStatusService: adapterStatusService, + nodePoolService: nodePoolService, + } +} + +// List returns all adapter statuses for a nodepool with pagination +func (h nodePoolStatusHandler) List(w http.ResponseWriter, r *http.Request) { + cfg := &handlerConfig{ + Action: func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + nodePoolID := mux.Vars(r)["nodepool_id"] + listArgs := services.NewListArguments(r.URL.Query()) + + // Fetch adapter statuses with pagination + adapterStatuses, total, err := h.adapterStatusService.FindByResourcePaginated(ctx, "NodePool", nodePoolID, listArgs) + if err != nil { + return nil, err + } + + // Convert to OpenAPI models + items := make([]openapi.AdapterStatus, 0, len(adapterStatuses)) + for _, as := range adapterStatuses { + items = append(items, *as.ToOpenAPI()) + } + + // Return list response with pagination metadata + response := openapi.AdapterStatusList{ + Kind: "AdapterStatusList", + Items: items, + Page: int32(listArgs.Page), + Size: int32(len(items)), + Total: int32(total), + } + + return response, nil + }, + } + + handleList(w, r, cfg) +} + +// Create creates or updates an adapter status for a nodepool +func (h nodePoolStatusHandler) Create(w http.ResponseWriter, r *http.Request) { + var req openapi.AdapterStatusCreateRequest + + cfg := &handlerConfig{ + &req, + []validate{ + validateNotEmpty(&req, "Adapter", "adapter"), + }, + func() (interface{}, *errors.ServiceError) { + ctx := r.Context() + nodePoolID := mux.Vars(r)["nodepool_id"] + + // Verify nodepool exists + _, err := h.nodePoolService.Get(ctx, nodePoolID) + if err != nil { + return nil, err + } + + // Check if adapter status already exists + existing, _ := h.adapterStatusService.FindByResourceAndAdapter(ctx, "NodePool", nodePoolID, req.Adapter) + + var adapterStatus *api.AdapterStatus + if existing != nil { + // Update existing + existing.ObservedGeneration = req.ObservedGeneration + conditionsJSON, _ := json.Marshal(req.Conditions) + existing.Conditions = conditionsJSON + if req.Data != nil { + dataJSON, _ := json.Marshal(req.Data) + existing.Data = dataJSON + } + adapterStatus, err = h.adapterStatusService.Replace(ctx, existing) + } else { + // Create new + newStatus := api.AdapterStatusFromOpenAPICreate("NodePool", nodePoolID, &req) + adapterStatus, err = h.adapterStatusService.Create(ctx, newStatus) + } + + if err != nil { + return nil, err + } + + // Trigger status aggregation + _, aggregateErr := h.nodePoolService.UpdateNodePoolStatusFromAdapters(ctx, nodePoolID) + if aggregateErr != nil { + // Log error but don't fail the request + // The status will be computed on next update + } + + return adapterStatus.ToOpenAPI(), nil + }, + handleError, + } + + handle(w, r, cfg, http.StatusCreated) +} diff --git a/pkg/handlers/openapi-ui.html b/pkg/handlers/openapi-ui.html new file mode 100755 index 0000000..ed31ce8 --- /dev/null +++ b/pkg/handlers/openapi-ui.html @@ -0,0 +1,26 @@ + + + + + + + SwaggerUI + + + +
+ + + + + diff --git a/pkg/handlers/openapi.go b/pkg/handlers/openapi.go new file mode 100755 index 0000000..dd77bd5 --- /dev/null +++ b/pkg/handlers/openapi.go @@ -0,0 +1,68 @@ +package handlers + +import ( + "embed" + "io/fs" + "net/http" + + "github.com/ghodss/yaml" + "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +//go:embed openapi-ui.html +var openapiui embed.FS + +type openAPIHandler struct { + openAPIDefinitions []byte + uiContent []byte +} + +func NewOpenAPIHandler() (*openAPIHandler, error) { + // Load the fully resolved OpenAPI spec from embedded filesystem + resolvedData, err := api.GetOpenAPISpec() + if err != nil { + return nil, errors.GeneralError( + "can't load OpenAPI specification from embedded file: %v", + err, + ) + } + + // Convert YAML to JSON + data, err := yaml.YAMLToJSON(resolvedData) + if err != nil { + return nil, errors.GeneralError( + "can't convert OpenAPI specification from YAML to JSON: %v", + err, + ) + } + glog.Info("Loaded fully resolved OpenAPI specification from embedded pkg/api/openapi/api/openapi.yaml") + + // Load the OpenAPI UI HTML content + uiContent, err := fs.ReadFile(openapiui, "openapi-ui.html") + if err != nil { + return nil, errors.GeneralError( + "can't load OpenAPI UI HTML from embedded file: %v", + err, + ) + } + glog.Info("Loaded OpenAPI UI HTML from embedded file") + + return &openAPIHandler{ + openAPIDefinitions: data, + uiContent: uiContent, + }, nil +} + +func (h *openAPIHandler) GetOpenAPI(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "application/json") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(h.openAPIDefinitions) +} + +func (h *openAPIHandler) GetOpenAPIUI(w http.ResponseWriter, r *http.Request) { + w.Header().Set("Content-Type", "text/html") + w.WriteHeader(http.StatusOK) + _, _ = w.Write(h.uiContent) +} diff --git a/pkg/handlers/prometheus_metrics.go b/pkg/handlers/prometheus_metrics.go new file mode 100755 index 0000000..496a3cd --- /dev/null +++ b/pkg/handlers/prometheus_metrics.go @@ -0,0 +1,23 @@ +package handlers + +import ( + "net/http" + + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +type prometheusMetricsHandler struct { +} + +// NewPrometheusMetricsHandler adds custom metrics and proxy to prometheus handler +func NewPrometheusMetricsHandler() *prometheusMetricsHandler { + return &prometheusMetricsHandler{} +} + +func (h *prometheusMetricsHandler) Handler() http.Handler { + handler := promhttp.Handler() + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + handler.ServeHTTP(w, r) + }) +} diff --git a/pkg/handlers/rest.go b/pkg/handlers/rest.go new file mode 100755 index 0000000..63c0556 --- /dev/null +++ b/pkg/handlers/rest.go @@ -0,0 +1,11 @@ +package handlers + +import "net/http" + +type RestHandler interface { + List(w http.ResponseWriter, r *http.Request) + Get(w http.ResponseWriter, r *http.Request) + Create(w http.ResponseWriter, r *http.Request) + Patch(w http.ResponseWriter, r *http.Request) + Delete(w http.ResponseWriter, r *http.Request) +} diff --git a/pkg/handlers/validation.go b/pkg/handlers/validation.go new file mode 100755 index 0000000..90a319d --- /dev/null +++ b/pkg/handlers/validation.go @@ -0,0 +1,55 @@ +package handlers + +import ( + "reflect" + "strings" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +func validateNotEmpty(i interface{}, fieldName string, field string) validate { + return func() *errors.ServiceError { + value := reflect.ValueOf(i).Elem().FieldByName(fieldName) + if value.Kind() == reflect.Ptr { + if value.IsNil() { + return errors.Validation("%s is required", field) + } + value = value.Elem() + } + if len(value.String()) == 0 { + return errors.Validation("%s is required", field) + } + return nil + } +} + +func validateEmpty(i interface{}, fieldName string, field string) validate { + return func() *errors.ServiceError { + value := reflect.ValueOf(i).Elem().FieldByName(fieldName) + if value.Kind() == reflect.Ptr { + if value.IsNil() { + return nil + } + value = value.Elem() + } + if len(value.String()) != 0 { + return errors.Validation("%s must be empty", field) + } + return nil + } +} + +// Note that because this uses strings.EqualFold, it is case-insensitive +func validateInclusionIn(value *string, list []string, category *string) validate { + return func() *errors.ServiceError { + for _, item := range list { + if strings.EqualFold(*value, item) { + return nil + } + } + if category == nil { + category = &[]string{"value"}[0] + } + return errors.Validation("%s is not a valid %s", *value, *category) + } +} diff --git a/pkg/logger/logger.go b/pkg/logger/logger.go new file mode 100755 index 0000000..cde256f --- /dev/null +++ b/pkg/logger/logger.go @@ -0,0 +1,128 @@ +package logger + +import ( + "context" + "fmt" + "strings" + + "github.com/golang/glog" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/util" +) + +type OCMLogger interface { + V(level int32) OCMLogger + Infof(format string, args ...interface{}) + Extra(key string, value interface{}) OCMLogger + Info(message string) + Warning(message string) + Error(message string) + Fatal(message string) +} + +var _ OCMLogger = &logger{} + +type extra map[string]interface{} + +type logger struct { + context context.Context + level int32 + accountID string + // TODO username is unused, should we be logging it? Could be pii + username string + extra extra +} + +// NewOCMLogger creates a new logger instance with a default verbosity of 1 +func NewOCMLogger(ctx context.Context) OCMLogger { + logger := &logger{ + context: ctx, + level: 1, + extra: make(extra), + accountID: util.GetAccountIDFromContext(ctx), + } + return logger +} + +func (l *logger) prepareLogPrefix(message string, extra extra) string { + prefix := " " + + if txid, ok := l.context.Value("txid").(int64); ok { + prefix = fmt.Sprintf("[tx_id=%d]%s", txid, prefix) + } + + if l.accountID != "" { + prefix = fmt.Sprintf("[accountID=%s]%s", l.accountID, prefix) + } + + if opid, ok := l.context.Value(OpIDKey).(string); ok { + prefix = fmt.Sprintf("[opid=%s]%s", opid, prefix) + } + + var args []string + for k, v := range extra { + args = append(args, fmt.Sprintf("%s=%v", k, v)) + } + + return fmt.Sprintf("%s %s %s", prefix, message, strings.Join(args, " ")) +} + +func (l *logger) prepareLogPrefixf(format string, args ...interface{}) string { + orig := fmt.Sprintf(format, args...) + prefix := " " + + if txid, ok := l.context.Value("txid").(int64); ok { + prefix = fmt.Sprintf("[tx_id=%d]%s", txid, prefix) + } + + if l.accountID != "" { + prefix = fmt.Sprintf("[accountID=%s]%s", l.accountID, prefix) + } + + if opid, ok := l.context.Value(OpIDKey).(string); ok { + prefix = fmt.Sprintf("[opid=%s]%s", opid, prefix) + } + + return fmt.Sprintf("%s%s", prefix, orig) +} + +func (l *logger) V(level int32) OCMLogger { + return &logger{ + context: l.context, + accountID: l.accountID, + username: l.username, + level: level, + } +} + +// Infof doesn't trigger Sentry error +func (l *logger) Infof(format string, args ...interface{}) { + prefixed := l.prepareLogPrefixf(format, args...) + glog.V(glog.Level(l.level)).Infof("%s", prefixed) +} + +func (l *logger) Extra(key string, value interface{}) OCMLogger { + l.extra[key] = value + return l +} + +func (l *logger) Info(message string) { + l.log(message, glog.V(glog.Level(l.level)).Infoln) +} + +func (l *logger) Warning(message string) { + l.log(message, glog.Warningln) +} + +func (l *logger) Error(message string) { + l.log(message, glog.Errorln) +} + +func (l *logger) Fatal(message string) { + l.log(message, glog.Fatalln) +} + +func (l *logger) log(message string, glogFunc func(args ...interface{})) { + prefixed := l.prepareLogPrefix(message, l.extra) + glogFunc(prefixed) +} + diff --git a/pkg/logger/operationid_middleware.go b/pkg/logger/operationid_middleware.go new file mode 100755 index 0000000..497b9ed --- /dev/null +++ b/pkg/logger/operationid_middleware.go @@ -0,0 +1,43 @@ +package logger + +import ( + "context" + "net/http" + + "github.com/segmentio/ksuid" +) + +type OperationIDKey string + +const OpIDKey OperationIDKey = "opID" +const OpIDHeader OperationIDKey = "X-Operation-ID" + +// OperationIDMiddleware Middleware wraps the given HTTP handler so that the details of the request are sent to the log. +func OperationIDMiddleware(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ctx := WithOpID(r.Context()) + + opID, ok := ctx.Value(OpIDKey).(string) + if ok && len(opID) > 0 { + w.Header().Set(string(OpIDHeader), opID) + } + + handler.ServeHTTP(w, r.WithContext(ctx)) + }) +} + +func WithOpID(ctx context.Context) context.Context { + if ctx.Value(OpIDKey) != nil { + return ctx + } + opID := ksuid.New().String() + return context.WithValue(ctx, OpIDKey, opID) +} + +// GetOperationID get operationID of the context +func GetOperationID(ctx context.Context) string { + if opID, ok := ctx.Value(OpIDKey).(string); ok { + return opID + } + return "" +} diff --git a/pkg/services/adapter_status.go b/pkg/services/adapter_status.go new file mode 100644 index 0000000..1295ffa --- /dev/null +++ b/pkg/services/adapter_status.go @@ -0,0 +1,99 @@ +package services + +import ( + "context" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +type AdapterStatusService interface { + Get(ctx context.Context, id string) (*api.AdapterStatus, *errors.ServiceError) + Create(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) + Replace(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) + Delete(ctx context.Context, id string) *errors.ServiceError + FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, *errors.ServiceError) + FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, listArgs *ListArguments) (api.AdapterStatusList, int64, *errors.ServiceError) + FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, *errors.ServiceError) + All(ctx context.Context) (api.AdapterStatusList, *errors.ServiceError) +} + +func NewAdapterStatusService(adapterStatusDao dao.AdapterStatusDao) AdapterStatusService { + return &sqlAdapterStatusService{ + adapterStatusDao: adapterStatusDao, + } +} + +var _ AdapterStatusService = &sqlAdapterStatusService{} + +type sqlAdapterStatusService struct { + adapterStatusDao dao.AdapterStatusDao +} + +func (s *sqlAdapterStatusService) Get(ctx context.Context, id string) (*api.AdapterStatus, *errors.ServiceError) { + adapterStatus, err := s.adapterStatusDao.Get(ctx, id) + if err != nil { + return nil, handleGetError("AdapterStatus", "id", id, err) + } + return adapterStatus, nil +} + +func (s *sqlAdapterStatusService) Create(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) { + adapterStatus, err := s.adapterStatusDao.Create(ctx, adapterStatus) + if err != nil { + return nil, handleCreateError("AdapterStatus", err) + } + return adapterStatus, nil +} + +func (s *sqlAdapterStatusService) Replace(ctx context.Context, adapterStatus *api.AdapterStatus) (*api.AdapterStatus, *errors.ServiceError) { + adapterStatus, err := s.adapterStatusDao.Replace(ctx, adapterStatus) + if err != nil { + return nil, handleUpdateError("AdapterStatus", err) + } + return adapterStatus, nil +} + +func (s *sqlAdapterStatusService) Delete(ctx context.Context, id string) *errors.ServiceError { + if err := s.adapterStatusDao.Delete(ctx, id); err != nil { + return handleDeleteError("AdapterStatus", errors.GeneralError("Unable to delete adapter status: %s", err)) + } + return nil +} + +func (s *sqlAdapterStatusService) FindByResource(ctx context.Context, resourceType, resourceID string) (api.AdapterStatusList, *errors.ServiceError) { + statuses, err := s.adapterStatusDao.FindByResource(ctx, resourceType, resourceID) + if err != nil { + return nil, errors.GeneralError("Unable to get adapter statuses: %s", err) + } + return statuses, nil +} + +func (s *sqlAdapterStatusService) FindByResourcePaginated(ctx context.Context, resourceType, resourceID string, listArgs *ListArguments) (api.AdapterStatusList, int64, *errors.ServiceError) { + offset := (listArgs.Page - 1) * int(listArgs.Size) + limit := int(listArgs.Size) + + statuses, total, err := s.adapterStatusDao.FindByResourcePaginated(ctx, resourceType, resourceID, offset, limit) + if err != nil { + return nil, 0, errors.GeneralError("Unable to get adapter statuses: %s", err) + } + + return statuses, total, nil +} + +func (s *sqlAdapterStatusService) FindByResourceAndAdapter(ctx context.Context, resourceType, resourceID, adapter string) (*api.AdapterStatus, *errors.ServiceError) { + status, err := s.adapterStatusDao.FindByResourceAndAdapter(ctx, resourceType, resourceID, adapter) + if err != nil { + return nil, handleGetError("AdapterStatus", "adapter", adapter, err) + } + return status, nil +} + +func (s *sqlAdapterStatusService) All(ctx context.Context) (api.AdapterStatusList, *errors.ServiceError) { + statuses, err := s.adapterStatusDao.All(ctx) + if err != nil { + return nil, errors.GeneralError("Unable to get all adapter statuses: %s", err) + } + return statuses, nil +} diff --git a/pkg/services/cluster.go b/pkg/services/cluster.go new file mode 100644 index 0000000..466fdc6 --- /dev/null +++ b/pkg/services/cluster.go @@ -0,0 +1,220 @@ +package services + +import ( + "context" + "encoding/json" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +type ClusterService interface { + Get(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) + Create(ctx context.Context, cluster *api.Cluster) (*api.Cluster, *errors.ServiceError) + Replace(ctx context.Context, cluster *api.Cluster) (*api.Cluster, *errors.ServiceError) + Delete(ctx context.Context, id string) *errors.ServiceError + All(ctx context.Context) (api.ClusterList, *errors.ServiceError) + + FindByIDs(ctx context.Context, ids []string) (api.ClusterList, *errors.ServiceError) + + // Status aggregation + UpdateClusterStatusFromAdapters(ctx context.Context, clusterID string) (*api.Cluster, *errors.ServiceError) + + // idempotent functions for the control plane, but can also be called synchronously by any actor + OnUpsert(ctx context.Context, id string) error + OnDelete(ctx context.Context, id string) error +} + +func NewClusterService(clusterDao dao.ClusterDao, adapterStatusDao dao.AdapterStatusDao) ClusterService { + return &sqlClusterService{ + clusterDao: clusterDao, + adapterStatusDao: adapterStatusDao, + } +} + +var _ ClusterService = &sqlClusterService{} + +type sqlClusterService struct { + clusterDao dao.ClusterDao + adapterStatusDao dao.AdapterStatusDao +} + +func (s *sqlClusterService) Get(ctx context.Context, id string) (*api.Cluster, *errors.ServiceError) { + cluster, err := s.clusterDao.Get(ctx, id) + if err != nil { + return nil, handleGetError("Cluster", "id", id, err) + } + return cluster, nil +} + +func (s *sqlClusterService) Create(ctx context.Context, cluster *api.Cluster) (*api.Cluster, *errors.ServiceError) { + cluster, err := s.clusterDao.Create(ctx, cluster) + if err != nil { + return nil, handleCreateError("Cluster", err) + } + + // REMOVED: Event creation - no event-driven components + return cluster, nil +} + +func (s *sqlClusterService) Replace(ctx context.Context, cluster *api.Cluster) (*api.Cluster, *errors.ServiceError) { + cluster, err := s.clusterDao.Replace(ctx, cluster) + if err != nil { + return nil, handleUpdateError("Cluster", err) + } + + // REMOVED: Event creation - no event-driven components + return cluster, nil +} + +func (s *sqlClusterService) Delete(ctx context.Context, id string) *errors.ServiceError { + if err := s.clusterDao.Delete(ctx, id); err != nil { + return handleDeleteError("Cluster", errors.GeneralError("Unable to delete cluster: %s", err)) + } + + // REMOVED: Event creation - no event-driven components + return nil +} + +func (s *sqlClusterService) FindByIDs(ctx context.Context, ids []string) (api.ClusterList, *errors.ServiceError) { + clusters, err := s.clusterDao.FindByIDs(ctx, ids) + if err != nil { + return nil, errors.GeneralError("Unable to get all clusters: %s", err) + } + return clusters, nil +} + +func (s *sqlClusterService) All(ctx context.Context) (api.ClusterList, *errors.ServiceError) { + clusters, err := s.clusterDao.All(ctx) + if err != nil { + return nil, errors.GeneralError("Unable to get all clusters: %s", err) + } + return clusters, nil +} + +func (s *sqlClusterService) OnUpsert(ctx context.Context, id string) error { + logger := logger.NewOCMLogger(ctx) + + cluster, err := s.clusterDao.Get(ctx, id) + if err != nil { + return err + } + + logger.Infof("Do idempotent somethings with this cluster: %s", cluster.ID) + + return nil +} + +func (s *sqlClusterService) OnDelete(ctx context.Context, id string) error { + logger := logger.NewOCMLogger(ctx) + logger.Infof("This cluster has been deleted: %s", id) + return nil +} + +// UpdateClusterStatusFromAdapters aggregates adapter statuses into cluster status +func (s *sqlClusterService) UpdateClusterStatusFromAdapters(ctx context.Context, clusterID string) (*api.Cluster, *errors.ServiceError) { + // Get the cluster + cluster, err := s.clusterDao.Get(ctx, clusterID) + if err != nil { + return nil, handleGetError("Cluster", "id", clusterID, err) + } + + // Get all adapter statuses for this cluster + adapterStatuses, err := s.adapterStatusDao.FindByResource(ctx, "Cluster", clusterID) + if err != nil { + return nil, errors.GeneralError("Failed to get adapter statuses: %s", err) + } + + // Build the list of ConditionAvailable + adapters := []openapi.ConditionAvailable{} + allReady := true + anyFailed := false + maxObservedGeneration := int32(0) + + for _, adapterStatus := range adapterStatuses { + // Unmarshal Conditions from JSONB + var conditions []openapi.Condition + if err := json.Unmarshal(adapterStatus.Conditions, &conditions); err != nil { + continue // Skip if can't unmarshal + } + + // Find the "Available" condition + var availableCondition *openapi.Condition + for i := range conditions { + if conditions[i].Type == "Available" { + availableCondition = &conditions[i] + break + } + } + + if availableCondition == nil { + // No Available condition means adapter is not ready + allReady = false + continue + } + + // Convert to ConditionAvailable + condAvail := openapi.ConditionAvailable{ + Type: availableCondition.Type, + Adapter: adapterStatus.Adapter, + Status: availableCondition.Status, + Reason: availableCondition.Reason, + Message: availableCondition.Message, + ObservedGeneration: availableCondition.ObservedGeneration, + } + adapters = append(adapters, condAvail) + + // Check status + if availableCondition.Status != "True" { + allReady = false + if availableCondition.Status == "False" { + anyFailed = true + } + } + + // Track max observed generation + if adapterStatus.ObservedGeneration > maxObservedGeneration { + maxObservedGeneration = adapterStatus.ObservedGeneration + } + } + + // Compute overall phase + phase := "NotReady" + if len(adapterStatuses) > 0 { + if allReady { + phase = "Ready" + } else if anyFailed { + phase = "Failed" + } + } + + // Update cluster status fields + now := time.Now() + cluster.StatusPhase = phase + cluster.StatusObservedGeneration = maxObservedGeneration + + // Marshal adapters to JSON + adaptersJSON, err := json.Marshal(adapters) + if err != nil { + return nil, errors.GeneralError("Failed to marshal adapters: %s", err) + } + cluster.StatusAdapters = adaptersJSON + cluster.StatusUpdatedAt = &now + + // Update last transition time if phase changed + if cluster.StatusLastTransitionTime == nil || cluster.StatusPhase != phase { + cluster.StatusLastTransitionTime = &now + } + + // Save the updated cluster + cluster, err = s.clusterDao.Replace(ctx, cluster) + if err != nil { + return nil, handleUpdateError("Cluster", err) + } + + return cluster, nil +} diff --git a/pkg/services/generic.go b/pkg/services/generic.go new file mode 100755 index 0000000..79917d3 --- /dev/null +++ b/pkg/services/generic.go @@ -0,0 +1,346 @@ +package services + +import ( + "context" + e "errors" + "fmt" + "reflect" + "strings" + + "gorm.io/gorm" + + "github.com/Masterminds/squirrel" + "github.com/yaacov/tree-search-language/pkg/tsl" + "github.com/yaacov/tree-search-language/pkg/walkers/ident" + sqlFilter "github.com/yaacov/tree-search-language/pkg/walkers/sql" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +type GenericService interface { + List(ctx context.Context, username string, args *ListArguments, resourceList interface{}) (*api.PagingMeta, *errors.ServiceError) +} + +func NewGenericService(genericDao dao.GenericDao) GenericService { + return &sqlGenericService{genericDao: genericDao} +} + +var _ GenericService = &sqlGenericService{} + +type sqlGenericService struct { + genericDao dao.GenericDao +} + +var ( + SearchDisallowedFields = map[string]map[string]string{} + allFieldsAllowed = map[string]string{} +) + +// wrap all needed pieces for the LIST funciton +type listContext struct { + ctx context.Context + args *ListArguments + username string + pagingMeta *api.PagingMeta + ulog *logger.OCMLogger + resourceList interface{} + disallowedFields *map[string]string + resourceType string + joins map[string]dao.TableRelation + groupBy []string + set map[string]bool +} + +func (s *sqlGenericService) newListContext(ctx context.Context, username string, args *ListArguments, resourceList interface{}) (*listContext, interface{}, *errors.ServiceError) { + log := logger.NewOCMLogger(ctx) + resourceModel := reflect.TypeOf(resourceList).Elem().Elem() + resourceTypeStr := resourceModel.Name() + if resourceTypeStr == "" { + return nil, nil, errors.GeneralError("Could not determine resource type") + } + disallowedFields := SearchDisallowedFields[resourceTypeStr] + if disallowedFields == nil { + disallowedFields = allFieldsAllowed + } + args.Search = strings.Trim(args.Search, " ") + return &listContext{ + ctx: ctx, + args: args, + username: username, + pagingMeta: &api.PagingMeta{Page: args.Page}, + ulog: &log, + resourceList: resourceList, + disallowedFields: &disallowedFields, + resourceType: resourceTypeStr, + }, reflect.New(resourceModel).Interface(), nil +} + +// List resourceList must be a pointer to a slice of database resource objects +func (s *sqlGenericService) List(ctx context.Context, username string, args *ListArguments, resourceList interface{}) (*api.PagingMeta, *errors.ServiceError) { + listCtx, model, err := s.newListContext(ctx, username, args, resourceList) + if err != nil { + return nil, err + } + + // the ordering for the sub functions matters. + builders := []listBuilder{ + // build SQL to load related resource. for now, it delegates to gorm.preload. + s.buildPreload, + + // add "ORDER BY" + s.buildOrderBy, + + // translate "search" into "WHERE"(s), and "JOIN"(s) if related resource is searched. + s.buildSearch, + + // TODO: add any custom builder functions + } + + d := s.genericDao.GetInstanceDao(ctx, model) + + // run all the "builders". they cumulatively add constructs to gorm by the context. + // it stops when a builder function raises error or signals finished. + var finished bool + for _, builderFn := range builders { + if finished, err = builderFn(listCtx, &d); err != nil { + return nil, err + } + if finished { + if err = s.loadList(listCtx, &d); err != nil { + return nil, err + } + break + } + } + return listCtx.pagingMeta, nil +} + +/*** Define all sub functions in the type of listBuilder ***/ +type listBuilder func(*listContext, *dao.GenericDao) (finished bool, err *errors.ServiceError) + +func (s *sqlGenericService) buildPreload(listCtx *listContext, d *dao.GenericDao) (bool, *errors.ServiceError) { + listCtx.set = make(map[string]bool) + + for _, preload := range listCtx.args.Preloads { + listCtx.set[preload] = true + } + // preload each table only once; struct{} doesn't occupy any additional space + for _, preload := range listCtx.args.Preloads { + (*d).Preload(preload) + } + return false, nil +} + +func (s *sqlGenericService) buildOrderBy(listCtx *listContext, d *dao.GenericDao) (bool, *errors.ServiceError) { + if len(listCtx.args.OrderBy) != 0 { + orderByArgs, serviceErr := db.ArgsToOrderBy(listCtx.args.OrderBy, *listCtx.disallowedFields) + if serviceErr != nil { + return false, serviceErr + } + for _, orderByArg := range orderByArgs { + (*d).OrderBy(orderByArg) + } + } + return false, nil +} + +func (s *sqlGenericService) buildSearchValues(listCtx *listContext, d *dao.GenericDao) (string, []any, *errors.ServiceError) { + if listCtx.args.Search == "" { + s.addJoins(listCtx, d) + return "", nil, nil + } + + // create the TSL tree + tslTree, err := tsl.ParseTSL(listCtx.args.Search) + if err != nil { + return "", nil, errors.BadRequest("Failed to parse search query: %s", listCtx.args.Search) + } + // find all related tables + tslTree, serviceErr := s.treeWalkForRelatedTables(listCtx, tslTree, d) + if serviceErr != nil { + return "", nil, serviceErr + } + // prepend table names to prevent "ambiguous" errors + tslTree, serviceErr = s.treeWalkForAddingTableName(listCtx, tslTree, d) + if serviceErr != nil { + return "", nil, serviceErr + } + // convert to sqlizer + _, sqlizer, serviceErr := s.treeWalkForSqlizer(listCtx, tslTree) + if serviceErr != nil { + return "", nil, serviceErr + } + + s.addJoins(listCtx, d) + + // parse the search string to SQL WHERE + sql, values, err := sqlizer.ToSql() + if err != nil { + return "", nil, errors.GeneralError("%s", err.Error()) + } + return sql, values, nil +} + +func (s *sqlGenericService) buildSearch(listCtx *listContext, d *dao.GenericDao) (bool, *errors.ServiceError) { + sql, values, err := s.buildSearchValues(listCtx, d) + if err != nil { + return false, err + } + (*d).Where(dao.NewWhere(sql, values)) + return true, nil +} + +// JOIN the tables that appear in the search string +func (s *sqlGenericService) addJoins(listCtx *listContext, d *dao.GenericDao) { + for _, r := range listCtx.joins { + if _, ok := listCtx.set[r.ForeignTableName]; ok { + // skip already included preloads + continue + } + sql := fmt.Sprintf( + "LEFT JOIN %s ON %s.%s = %s.%s AND %s.deleted_at IS NULL", + r.ForeignTableName, r.ForeignTableName, r.ForeignColumnName, r.TableName, r.ColumnName, r.ForeignTableName) + (*d).Joins(sql) + + listCtx.groupBy = append(listCtx.groupBy, r.ForeignTableName+".id") + listCtx.set[r.ForeignTableName] = true + } + if len(listCtx.joins) > 0 { + // Add base relation + listCtx.groupBy = append(listCtx.groupBy, (*d).GetTableName()+".id") + (*d).Group(strings.Join(listCtx.groupBy, ",")) + } + + // Reset list of joins and group by's + listCtx.joins = map[string]dao.TableRelation{} +} + +func (s *sqlGenericService) loadList(listCtx *listContext, d *dao.GenericDao) *errors.ServiceError { + args := listCtx.args + ulog := *listCtx.ulog + + (*d).Count(listCtx.resourceList, &listCtx.pagingMeta.Total) + + // Set resourceList to be an empty slice with zero capacity. Real space will be allocated by g2.Find() + if err := zeroSlice(listCtx.resourceList, 0); err != nil { + return err + } + + switch { + case args.Size > MaxListSize: + ulog.Warning("A query with a size greater than the maximum was requested.") + case args.Size < 0: + ulog.Warning("A query with an unbound size was requested.") + case args.Size == 0: + // This early return is not only performant, but also necessary. + // gorm does not support Limit(0) any longer. + ulog.Infof("A query with 0 size requested, returning early without collecting any resources from database") + return nil + } + + // NOTE: Limit no longer supports '0' size and will cause issues. There is an early return, do not remove it. + // https://github.com/go-gorm/gorm/blob/master/clause/limit.go#L18-L21 + if err := (*d).Fetch((args.Page-1)*int(args.Size), int(args.Size), listCtx.resourceList); err != nil { + if e.Is(err, gorm.ErrRecordNotFound) { + listCtx.pagingMeta.Size = 0 + } else { + return errors.GeneralError("Unable to list resources: %s", err) + } + } + listCtx.pagingMeta.Size = int64(reflect.ValueOf(listCtx.resourceList).Elem().Len()) + + return nil +} + +// Allocate a slice with size 'cap' of the type i +func zeroSlice(i interface{}, cap int64) *errors.ServiceError { + v := reflect.ValueOf(i) + if v.Kind() != reflect.Ptr { + return errors.GeneralError("A non-pointer to a list of resources: %v", v.Type()) + } + // get the value that the pointer v points to. + v = v.Elem() + if v.Kind() != reflect.Slice { + return errors.GeneralError("A non-slice list of resources") + } + v.Set(reflect.MakeSlice(v.Type(), 0, int(cap))) + return nil +} + +// walk the TSL tree looking for fields like, e.g., creator.username, and then: +// (1) look up the related table by its 1st part - creator +// (2) replace it by table name - creator.username -> accounts.username +func (s *sqlGenericService) treeWalkForRelatedTables(listCtx *listContext, tslTree tsl.Node, genericDao *dao.GenericDao) (tsl.Node, *errors.ServiceError) { + resourceTable := (*genericDao).GetTableName() + if listCtx.joins == nil { + listCtx.joins = map[string]dao.TableRelation{} + } + walkFn := func(field string) (string, error) { + fieldParts := strings.Split(field, ".") + if len(fieldParts) > 1 && fieldParts[0] != resourceTable { + fieldName := fieldParts[0] + _, exists := listCtx.joins[fieldName] + if !exists { + if relation, ok := (*genericDao).GetTableRelation(fieldName); ok { + listCtx.joins[fieldName] = relation + } else { + return field, fmt.Errorf("%s is not a related resource of %s", fieldName, listCtx.resourceType) + } + } + //replace by table name + fieldParts[0] = listCtx.joins[fieldName].ForeignTableName + return strings.Join(fieldParts, "."), nil + } + return field, nil + } + + tslTree, err := ident.Walk(tslTree, walkFn) + if err != nil { + return tslTree, errors.BadRequest("%s", err.Error()) + } + + return tslTree, nil +} + +// prepend table name to these "free" identifiers since they could cause "ambiguous" errors +func (s *sqlGenericService) treeWalkForAddingTableName(listCtx *listContext, tslTree tsl.Node, dao *dao.GenericDao) (tsl.Node, *errors.ServiceError) { + resourceTable := (*dao).GetTableName() + + walkFn := func(field string) (string, error) { + fieldParts := strings.Split(field, ".") + if len(fieldParts) == 1 { + if strings.Contains(field, "->") { + return field, nil + } + return fmt.Sprintf("%s.%s", resourceTable, field), nil + } + return field, nil + } + + tslTree, err := ident.Walk(tslTree, walkFn) + if err != nil { + return tslTree, errors.BadRequest("%s", err.Error()) + } + + return tslTree, nil +} + +func (s *sqlGenericService) treeWalkForSqlizer(listCtx *listContext, tslTree tsl.Node) (tsl.Node, squirrel.Sqlizer, *errors.ServiceError) { + // Check field names in tree + tslTree, serviceErr := db.FieldNameWalk(tslTree, *listCtx.disallowedFields) + if serviceErr != nil { + return tslTree, nil, serviceErr + } + + // Convert the search tree into SQL [Squirrel] filter + sqlizer, err := sqlFilter.Walk(tslTree) + if err != nil { + return tslTree, nil, errors.BadRequest("%s", err.Error()) + } + + return tslTree, sqlizer, nil +} diff --git a/pkg/services/generic_test.go b/pkg/services/generic_test.go new file mode 100755 index 0000000..a8cf01a --- /dev/null +++ b/pkg/services/generic_test.go @@ -0,0 +1,77 @@ +package services + +import ( + "context" + "testing" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + dbmocks "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db/mocks" + + "github.com/onsi/gomega/types" + "github.com/yaacov/tree-search-language/pkg/tsl" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + + . "github.com/onsi/gomega" +) + +func TestSQLTranslation(t *testing.T) { + RegisterTestingT(t) + var dbFactory db.SessionFactory = dbmocks.NewMockSessionFactory() + defer dbFactory.Close() + + g := dao.NewGenericDao(&dbFactory) + genericService := sqlGenericService{genericDao: g} + + // ill-formatted search or disallowed fields should be rejected + tests := []map[string]interface{}{ + { + "search": "garbage", + "error": "hyperfleet-21: Failed to parse search query: garbage", + }, + { + "search": "id in ('123')", + "error": "hyperfleet-21: clusters.id is not a valid field name", + }, + } + for _, test := range tests { + var list []api.Cluster + search := test["search"].(string) + errorMsg := test["error"].(string) + listCtx, model, serviceErr := genericService.newListContext(context.Background(), "", &ListArguments{Search: search}, &list) + Expect(serviceErr).ToNot(HaveOccurred()) + d := g.GetInstanceDao(context.Background(), model) + (*listCtx.disallowedFields)["id"] = "id" + _, serviceErr = genericService.buildSearch(listCtx, &d) + Expect(serviceErr).To(HaveOccurred()) + Expect(serviceErr.Code).To(Equal(errors.ErrorBadRequest)) + Expect(serviceErr.Error()).To(Equal(errorMsg)) + } + + // tests for sql parsing + tests = []map[string]interface{}{ + { + "search": "username in ('ooo.openshift')", + "sql": "username IN (?)", + "values": ConsistOf("ooo.openshift"), + }, + } + for _, test := range tests { + var list []api.Cluster + search := test["search"].(string) + sqlReal := test["sql"].(string) + valuesReal := test["values"].(types.GomegaMatcher) + listCtx, _, serviceErr := genericService.newListContext(context.Background(), "", &ListArguments{Search: search}, &list) + Expect(serviceErr).ToNot(HaveOccurred()) + tslTree, err := tsl.ParseTSL(search) + Expect(err).ToNot(HaveOccurred()) + _, sqlizer, serviceErr := genericService.treeWalkForSqlizer(listCtx, tslTree) + Expect(serviceErr).ToNot(HaveOccurred()) + sql, values, err := sqlizer.ToSql() + Expect(err).ToNot(HaveOccurred()) + Expect(sql).To(Equal(sqlReal)) + Expect(values).To(valuesReal) + } +} diff --git a/pkg/services/node_pool.go b/pkg/services/node_pool.go new file mode 100644 index 0000000..63ee330 --- /dev/null +++ b/pkg/services/node_pool.go @@ -0,0 +1,220 @@ +package services + +import ( + "context" + "encoding/json" + "time" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/logger" +) + +type NodePoolService interface { + Get(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) + Create(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, *errors.ServiceError) + Replace(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, *errors.ServiceError) + Delete(ctx context.Context, id string) *errors.ServiceError + All(ctx context.Context) (api.NodePoolList, *errors.ServiceError) + + FindByIDs(ctx context.Context, ids []string) (api.NodePoolList, *errors.ServiceError) + + // Status aggregation + UpdateNodePoolStatusFromAdapters(ctx context.Context, nodePoolID string) (*api.NodePool, *errors.ServiceError) + + // idempotent functions for the control plane, but can also be called synchronously by any actor + OnUpsert(ctx context.Context, id string) error + OnDelete(ctx context.Context, id string) error +} + +func NewNodePoolService(nodePoolDao dao.NodePoolDao, adapterStatusDao dao.AdapterStatusDao) NodePoolService { + return &sqlNodePoolService{ + nodePoolDao: nodePoolDao, + adapterStatusDao: adapterStatusDao, + } +} + +var _ NodePoolService = &sqlNodePoolService{} + +type sqlNodePoolService struct { + nodePoolDao dao.NodePoolDao + adapterStatusDao dao.AdapterStatusDao +} + +func (s *sqlNodePoolService) Get(ctx context.Context, id string) (*api.NodePool, *errors.ServiceError) { + nodePool, err := s.nodePoolDao.Get(ctx, id) + if err != nil { + return nil, handleGetError("NodePool", "id", id, err) + } + return nodePool, nil +} + +func (s *sqlNodePoolService) Create(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, *errors.ServiceError) { + nodePool, err := s.nodePoolDao.Create(ctx, nodePool) + if err != nil { + return nil, handleCreateError("NodePool", err) + } + + // REMOVED: Event creation - no event-driven components + return nodePool, nil +} + +func (s *sqlNodePoolService) Replace(ctx context.Context, nodePool *api.NodePool) (*api.NodePool, *errors.ServiceError) { + nodePool, err := s.nodePoolDao.Replace(ctx, nodePool) + if err != nil { + return nil, handleUpdateError("NodePool", err) + } + + // REMOVED: Event creation - no event-driven components + return nodePool, nil +} + +func (s *sqlNodePoolService) Delete(ctx context.Context, id string) *errors.ServiceError { + if err := s.nodePoolDao.Delete(ctx, id); err != nil { + return handleDeleteError("NodePool", errors.GeneralError("Unable to delete nodePool: %s", err)) + } + + // REMOVED: Event creation - no event-driven components + return nil +} + +func (s *sqlNodePoolService) FindByIDs(ctx context.Context, ids []string) (api.NodePoolList, *errors.ServiceError) { + nodePools, err := s.nodePoolDao.FindByIDs(ctx, ids) + if err != nil { + return nil, errors.GeneralError("Unable to get all nodePools: %s", err) + } + return nodePools, nil +} + +func (s *sqlNodePoolService) All(ctx context.Context) (api.NodePoolList, *errors.ServiceError) { + nodePools, err := s.nodePoolDao.All(ctx) + if err != nil { + return nil, errors.GeneralError("Unable to get all nodePools: %s", err) + } + return nodePools, nil +} + +func (s *sqlNodePoolService) OnUpsert(ctx context.Context, id string) error { + logger := logger.NewOCMLogger(ctx) + + nodePool, err := s.nodePoolDao.Get(ctx, id) + if err != nil { + return err + } + + logger.Infof("Do idempotent somethings with this nodePool: %s", nodePool.ID) + + return nil +} + +func (s *sqlNodePoolService) OnDelete(ctx context.Context, id string) error { + logger := logger.NewOCMLogger(ctx) + logger.Infof("This nodePool has been deleted: %s", id) + return nil +} + +// UpdateNodePoolStatusFromAdapters aggregates adapter statuses into nodepool status +func (s *sqlNodePoolService) UpdateNodePoolStatusFromAdapters(ctx context.Context, nodePoolID string) (*api.NodePool, *errors.ServiceError) { + // Get the nodepool + nodePool, err := s.nodePoolDao.Get(ctx, nodePoolID) + if err != nil { + return nil, handleGetError("NodePool", "id", nodePoolID, err) + } + + // Get all adapter statuses for this nodepool + adapterStatuses, err := s.adapterStatusDao.FindByResource(ctx, "NodePool", nodePoolID) + if err != nil { + return nil, errors.GeneralError("Failed to get adapter statuses: %s", err) + } + + // Build the list of ConditionAvailable + adapters := []openapi.ConditionAvailable{} + allReady := true + anyFailed := false + maxObservedGeneration := int32(0) + + for _, adapterStatus := range adapterStatuses { + // Unmarshal Conditions from JSONB + var conditions []openapi.Condition + if err := json.Unmarshal(adapterStatus.Conditions, &conditions); err != nil { + continue // Skip if can't unmarshal + } + + // Find the "Available" condition + var availableCondition *openapi.Condition + for i := range conditions { + if conditions[i].Type == "Available" { + availableCondition = &conditions[i] + break + } + } + + if availableCondition == nil { + // No Available condition means adapter is not ready + allReady = false + continue + } + + // Convert to ConditionAvailable + condAvail := openapi.ConditionAvailable{ + Type: availableCondition.Type, + Adapter: adapterStatus.Adapter, + Status: availableCondition.Status, + Reason: availableCondition.Reason, + Message: availableCondition.Message, + ObservedGeneration: availableCondition.ObservedGeneration, + } + adapters = append(adapters, condAvail) + + // Check status + if availableCondition.Status != "True" { + allReady = false + if availableCondition.Status == "False" { + anyFailed = true + } + } + + // Track max observed generation + if adapterStatus.ObservedGeneration > maxObservedGeneration { + maxObservedGeneration = adapterStatus.ObservedGeneration + } + } + + // Compute overall phase + phase := "NotReady" + if len(adapterStatuses) > 0 { + if allReady { + phase = "Ready" + } else if anyFailed { + phase = "Failed" + } + } + + // Update nodepool status fields + now := time.Now() + nodePool.StatusPhase = phase + nodePool.StatusObservedGeneration = maxObservedGeneration + + // Marshal adapters to JSON + adaptersJSON, err := json.Marshal(adapters) + if err != nil { + return nil, errors.GeneralError("Failed to marshal adapters: %s", err) + } + nodePool.StatusAdapters = adaptersJSON + nodePool.StatusUpdatedAt = &now + + // Update last transition time if phase changed + if nodePool.StatusLastTransitionTime == nil || nodePool.StatusPhase != phase { + nodePool.StatusLastTransitionTime = &now + } + + // Save the updated nodepool + nodePool, err = s.nodePoolDao.Replace(ctx, nodePool) + if err != nil { + return nil, handleUpdateError("NodePool", err) + } + + return nodePool, nil +} diff --git a/pkg/services/types.go b/pkg/services/types.go new file mode 100755 index 0000000..b86894c --- /dev/null +++ b/pkg/services/types.go @@ -0,0 +1,70 @@ +package services + +import ( + "net/url" + "strconv" + "strings" +) + +// ListArguments are arguments relevant for listing objects. +// This struct is common to all service List funcs in this package +type ListArguments struct { + Page int + Size int64 + Preloads []string + Search string + OrderBy []string + Fields []string +} + +// ~65500 is the maximum number of parameters that can be provided to a postgres WHERE IN clause +// Use it as a sane max +const MaxListSize = 65500 + +// NewListArguments Create ListArguments from url query parameters with sane defaults +func NewListArguments(params url.Values) *ListArguments { + listArgs := &ListArguments{ + Page: 1, + Size: 100, + Search: "", + } + if v := strings.Trim(params.Get("page"), " "); v != "" { + listArgs.Page, _ = strconv.Atoi(v) + } + // Support both "size" (legacy) and "pageSize" (OpenAPI spec) + if v := strings.Trim(params.Get("pageSize"), " "); v != "" { + listArgs.Size, _ = strconv.ParseInt(v, 10, 0) + } else if v := strings.Trim(params.Get("size"), " "); v != "" { + listArgs.Size, _ = strconv.ParseInt(v, 10, 0) + } + if listArgs.Size > MaxListSize || listArgs.Size < 0 { + // MaxListSize is the maximum number of *parameters* that can be provided to a postgres WHERE IN clause + // Use it as a sane max + listArgs.Size = MaxListSize + } + if v := strings.Trim(params.Get("search"), " "); v != "" { + listArgs.Search = v + } + if v := strings.Trim(params.Get("orderBy"), " "); v != "" { + listArgs.OrderBy = strings.Split(v, ",") + } + if v := strings.Trim(params.Get("fields"), " "); v != "" { + fields := strings.Split(v, ",") + idNotPresent := true + for i := 0; i < len(fields); i++ { + field := strings.Trim(fields[i], " ") + if field == "" { // skip leading/trailing commas and spaces + continue + } + if field == "id" { + idNotPresent = false + } + listArgs.Fields = append(listArgs.Fields, field) + } + if idNotPresent { + listArgs.Fields = append(listArgs.Fields, "id") + } + } + + return listArgs +} diff --git a/pkg/services/util.go b/pkg/services/util.go new file mode 100755 index 0000000..0e2eb00 --- /dev/null +++ b/pkg/services/util.go @@ -0,0 +1,51 @@ +package services + +import ( + e "errors" + "strings" + + "gorm.io/gorm" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/errors" +) + +// Field names suspected to contain personally identifiable information +var piiFields = []string{ + "username", + "first_name", + "last_name", + "email", + "address", +} + +func handleGetError(resourceType, field string, value interface{}, err error) *errors.ServiceError { + // Sanitize errors of any personally identifiable information + for _, f := range piiFields { + if field == f { + value = "" + break + } + } + if e.Is(err, gorm.ErrRecordNotFound) { + return errors.NotFound("%s with %s='%v' not found", resourceType, field, value) + } + return errors.GeneralError("Unable to find %s with %s='%v': %s", resourceType, field, value, err) +} + +func handleCreateError(resourceType string, err error) *errors.ServiceError { + if strings.Contains(err.Error(), "violates unique constraint") { + return errors.Conflict("This %s already exists", resourceType) + } + return errors.GeneralError("Unable to create %s: %s", resourceType, err.Error()) +} + +func handleUpdateError(resourceType string, err error) *errors.ServiceError { + if strings.Contains(err.Error(), "violates unique constraint") { + return errors.Conflict("Changes to %s conflict with existing records", resourceType) + } + return errors.GeneralError("Unable to update %s: %s", resourceType, err.Error()) +} + +func handleDeleteError(resourceType string, err error) *errors.ServiceError { + return errors.GeneralError("Unable to delete %s: %s", resourceType, err.Error()) +} diff --git a/pkg/util/utils.go b/pkg/util/utils.go new file mode 100755 index 0000000..1d261bc --- /dev/null +++ b/pkg/util/utils.go @@ -0,0 +1,56 @@ +package util + +import ( + "context" + "fmt" +) + +// ToPtr returns a pointer copy of value. +func ToPtr[T any](v T) *T { + return &v +} + +// FromPtr returns the pointer value or empty. +func FromPtr[T any](v *T) T { + if v == nil { + return Empty[T]() + } + return *v +} + +// FromEmptyPtr emulates ToPtr(FromPtr(x)) sequence +func FromEmptyPtr[T any](v *T) *T { + if v == nil { + x := Empty[T]() + return &x + } + return v +} + +// Empty returns an empty value of type T. +func Empty[T any]() T { + var zero T + return zero +} + +func EmptyStringToNil(a string) *string { + if a == "" { + return nil + } + return &a +} + +func NilToEmptyString(a *string) string { + if a == nil { + return "" + } + return *a +} + +func GetAccountIDFromContext(ctx context.Context) string { + accountID := ctx.Value("accountID") + if accountID == nil { + return "" + } + return fmt.Sprintf("%v", accountID) +} diff --git a/plugins/adapterStatus/plugin.go b/plugins/adapterStatus/plugin.go new file mode 100644 index 0000000..199777a --- /dev/null +++ b/plugins/adapterStatus/plugin.go @@ -0,0 +1,38 @@ +package adapterStatus + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments/registry" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +// ServiceLocator Service Locator +type ServiceLocator func() services.AdapterStatusService + +func NewServiceLocator(env *environments.Env) ServiceLocator { + return func() services.AdapterStatusService { + return services.NewAdapterStatusService( + dao.NewAdapterStatusDao(&env.Database.SessionFactory), + ) + } +} + +// Service helper function to get the adapter status service from the registry +func Service(s *environments.Services) services.AdapterStatusService { + if s == nil { + return nil + } + if obj := s.GetService("AdapterStatus"); obj != nil { + locator := obj.(ServiceLocator) + return locator() + } + return nil +} + +func init() { + // Service registration + registry.RegisterService("AdapterStatus", func(env interface{}) interface{} { + return NewServiceLocator(env.(*environments.Env)) + }) +} diff --git a/plugins/clusters/plugin.go b/plugins/clusters/plugin.go new file mode 100644 index 0000000..cf1ea41 --- /dev/null +++ b/plugins/clusters/plugin.go @@ -0,0 +1,96 @@ +package clusters + +import ( + "net/http" + + "github.com/gorilla/mux" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments/registry" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/presenters" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/auth" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/handlers" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" + "github.com/openshift-hyperfleet/hyperfleet-api/plugins/adapterStatus" + "github.com/openshift-hyperfleet/hyperfleet-api/plugins/generic" + "github.com/openshift-hyperfleet/hyperfleet-api/plugins/nodePools" +) + +// ServiceLocator Service Locator +type ServiceLocator func() services.ClusterService + +func NewServiceLocator(env *environments.Env) ServiceLocator { + return func() services.ClusterService { + return services.NewClusterService( + dao.NewClusterDao(&env.Database.SessionFactory), + dao.NewAdapterStatusDao(&env.Database.SessionFactory), + ) + } +} + +// Service helper function to get the cluster service from the registry +func Service(s *environments.Services) services.ClusterService { + if s == nil { + return nil + } + if obj := s.GetService("Clusters"); obj != nil { + locator := obj.(ServiceLocator) + return locator() + } + return nil +} + + +func init() { + // Service registration + registry.RegisterService("Clusters", func(env interface{}) interface{} { + return NewServiceLocator(env.(*environments.Env)) + }) + + // Routes registration + server.RegisterRoutes("clusters", func(apiV1Router *mux.Router, services server.ServicesInterface, authMiddleware auth.JWTMiddleware, authzMiddleware auth.AuthorizationMiddleware) { + envServices := services.(*environments.Services) + clusterHandler := handlers.NewClusterHandler(Service(envServices), generic.Service(envServices)) + + clustersRouter := apiV1Router.PathPrefix("/clusters").Subrouter() + clustersRouter.HandleFunc("", clusterHandler.List).Methods(http.MethodGet) + clustersRouter.HandleFunc("/{id}", clusterHandler.Get).Methods(http.MethodGet) + clustersRouter.HandleFunc("", clusterHandler.Create).Methods(http.MethodPost) + clustersRouter.HandleFunc("/{id}", clusterHandler.Patch).Methods(http.MethodPatch) + clustersRouter.HandleFunc("/{id}", clusterHandler.Delete).Methods(http.MethodDelete) + + // Nested resource: cluster statuses + clusterStatusHandler := handlers.NewClusterStatusHandler(adapterStatus.Service(envServices), Service(envServices)) + clustersRouter.HandleFunc("/{id}/statuses", clusterStatusHandler.List).Methods(http.MethodGet) + clustersRouter.HandleFunc("/{id}/statuses", clusterStatusHandler.Create).Methods(http.MethodPost) + + // Nested resource: cluster nodepools + clusterNodePoolsHandler := handlers.NewClusterNodePoolsHandler( + Service(envServices), + nodePools.Service(envServices), + generic.Service(envServices), + ) + clustersRouter.HandleFunc("/{id}/nodepools", clusterNodePoolsHandler.List).Methods(http.MethodGet) + clustersRouter.HandleFunc("/{id}/nodepools", clusterNodePoolsHandler.Create).Methods(http.MethodPost) + clustersRouter.HandleFunc("/{id}/nodepools/{nodepool_id}", clusterNodePoolsHandler.Get).Methods(http.MethodGet) + + // Nested resource: nodepool statuses + nodepoolStatusHandler := handlers.NewNodePoolStatusHandler(adapterStatus.Service(envServices), nodePools.Service(envServices)) + clustersRouter.HandleFunc("/{id}/nodepools/{nodepool_id}/statuses", nodepoolStatusHandler.List).Methods(http.MethodGet) + clustersRouter.HandleFunc("/{id}/nodepools/{nodepool_id}/statuses", nodepoolStatusHandler.Create).Methods(http.MethodPost) + + clustersRouter.Use(authMiddleware.AuthenticateAccountJWT) + clustersRouter.Use(authzMiddleware.AuthorizeApi) + }) + + // REMOVED: Controller registration - Sentinel handles orchestration + // Controllers are no longer run inside the API service + + // Presenter registration + presenters.RegisterPath(api.Cluster{}, "clusters") + presenters.RegisterPath(&api.Cluster{}, "clusters") + presenters.RegisterKind(api.Cluster{}, "Cluster") + presenters.RegisterKind(&api.Cluster{}, "Cluster") +} diff --git a/plugins/generic/plugin.go b/plugins/generic/plugin.go new file mode 100755 index 0000000..43b1b78 --- /dev/null +++ b/plugins/generic/plugin.go @@ -0,0 +1,36 @@ +package generic + +import ( + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments/registry" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" +) + +// ServiceLocator Service Locator +type ServiceLocator func() services.GenericService + +func NewServiceLocator(env *environments.Env) ServiceLocator { + return func() services.GenericService { + return services.NewGenericService(dao.NewGenericDao(&env.Database.SessionFactory)) + } +} + +// Service helper function to get the generic service from the registry +func Service(s *environments.Services) services.GenericService { + if s == nil { + return nil + } + if obj := s.GetService("Generic"); obj != nil { + locator := obj.(ServiceLocator) + return locator() + } + return nil +} + +func init() { + // Service registration + registry.RegisterService("Generic", func(env interface{}) interface{} { + return NewServiceLocator(env.(*environments.Env)) + }) +} diff --git a/plugins/nodePools/plugin.go b/plugins/nodePools/plugin.go new file mode 100644 index 0000000..e82aa00 --- /dev/null +++ b/plugins/nodePools/plugin.go @@ -0,0 +1,72 @@ +package nodePools + +import ( + "net/http" + + "github.com/gorilla/mux" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments/registry" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/presenters" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/auth" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/dao" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/handlers" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/services" + "github.com/openshift-hyperfleet/hyperfleet-api/plugins/generic" +) + +// ServiceLocator Service Locator +type ServiceLocator func() services.NodePoolService + +func NewServiceLocator(env *environments.Env) ServiceLocator { + return func() services.NodePoolService { + return services.NewNodePoolService( + dao.NewNodePoolDao(&env.Database.SessionFactory), + dao.NewAdapterStatusDao(&env.Database.SessionFactory), + ) + } +} + +// Service helper function to get the nodePool service from the registry +func Service(s *environments.Services) services.NodePoolService { + if s == nil { + return nil + } + if obj := s.GetService("NodePools"); obj != nil { + locator := obj.(ServiceLocator) + return locator() + } + return nil +} + + +func init() { + // Service registration + registry.RegisterService("NodePools", func(env interface{}) interface{} { + return NewServiceLocator(env.(*environments.Env)) + }) + + // Routes registration + server.RegisterRoutes("nodePools", func(apiV1Router *mux.Router, services server.ServicesInterface, authMiddleware auth.JWTMiddleware, authzMiddleware auth.AuthorizationMiddleware) { + envServices := services.(*environments.Services) + nodePoolHandler := handlers.NewNodePoolHandler(Service(envServices), generic.Service(envServices)) + + // Only register routes that are in the OpenAPI spec + // GET /api/hyperfleet/v1/nodepools - List all nodepools + nodePoolsRouter := apiV1Router.PathPrefix("/nodepools").Subrouter() + nodePoolsRouter.HandleFunc("", nodePoolHandler.List).Methods(http.MethodGet) + + nodePoolsRouter.Use(authMiddleware.AuthenticateAccountJWT) + nodePoolsRouter.Use(authzMiddleware.AuthorizeApi) + }) + + // REMOVED: Controller registration - Sentinel handles orchestration + // Controllers are no longer run inside the API service + + // Presenter registration + presenters.RegisterPath(api.NodePool{}, "node_pools") + presenters.RegisterPath(&api.NodePool{}, "node_pools") + presenters.RegisterKind(api.NodePool{}, "NodePool") + presenters.RegisterKind(&api.NodePool{}, "NodePool") +} diff --git a/serve b/serve new file mode 100755 index 0000000..d3f0474 Binary files /dev/null and b/serve differ diff --git a/test/factories/clusters.go b/test/factories/clusters.go new file mode 100644 index 0000000..9c483d1 --- /dev/null +++ b/test/factories/clusters.go @@ -0,0 +1,48 @@ +package factories + +import ( + "context" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/plugins/clusters" +) + +func (f *Factories) NewCluster(id string) (*api.Cluster, error) { + clusterService := clusters.Service(&environments.Environment().Services) + + cluster := &api.Cluster{ + Meta: api.Meta{ID: id}, + Name: "test-cluster-" + id, // Use unique name based on ID + Spec: []byte(`{"test": "spec"}`), + Generation: 42, + } + + sub, err := clusterService.Create(context.Background(), cluster) + if err != nil { + return nil, err + } + + return sub, nil +} + +func (f *Factories) NewClusterList(name string, count int) ([]*api.Cluster, error) { + var Clusters []*api.Cluster + for i := 1; i <= count; i++ { + c, err := f.NewCluster(f.NewID()) + if err != nil { + return nil, err + } + Clusters = append(Clusters, c) + } + return Clusters, nil +} + +// Aliases for test compatibility +func (f *Factories) NewClusters(id string) (*api.Cluster, error) { + return f.NewCluster(id) +} + +func (f *Factories) NewClustersList(name string, count int) ([]*api.Cluster, error) { + return f.NewClusterList(name, count) +} diff --git a/test/factories/factory.go b/test/factories/factory.go new file mode 100755 index 0000000..f5551e3 --- /dev/null +++ b/test/factories/factory.go @@ -0,0 +1,10 @@ +package factories + +import "github.com/segmentio/ksuid" + +type Factories struct { +} + +func (f *Factories) NewID() string { + return ksuid.New().String() +} diff --git a/test/factories/node_pools.go b/test/factories/node_pools.go new file mode 100644 index 0000000..4824ade --- /dev/null +++ b/test/factories/node_pools.go @@ -0,0 +1,68 @@ +package factories + +import ( + "context" + "fmt" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/plugins/nodePools" +) + +func (f *Factories) NewNodePool(id string) (*api.NodePool, error) { + nodePoolService := nodePools.Service(&environments.Environment().Services) + + if nodePoolService == nil { + return nil, fmt.Errorf("nodePoolService is nil - service not initialized") + } + + // Create a parent cluster first to get a valid OwnerID + cluster, err := f.NewCluster(f.NewID()) + if err != nil { + return nil, fmt.Errorf("failed to create parent cluster: %w", err) + } + + if cluster == nil { + return nil, fmt.Errorf("cluster is nil after NewCluster call") + } + + nodePool := &api.NodePool{ + Meta: api.Meta{ID: id}, + Name: "test-nodepool-" + id, // Use unique name based on ID + Spec: []byte(`{"test": "spec"}`), + OwnerID: cluster.ID, // Use real cluster ID + } + + sub, serviceErr := nodePoolService.Create(context.Background(), nodePool) + // Check for real errors (not typed nil) + if serviceErr != nil && serviceErr.Code != 0 { + return nil, fmt.Errorf("failed to create nodepool: %s (code: %d)", serviceErr.Reason, serviceErr.Code) + } + + if sub == nil { + return nil, fmt.Errorf("nodePoolService.Create returned nil without error") + } + + return sub, nil +} + +func (f *Factories) NewNodePoolList(name string, count int) ([]*api.NodePool, error) { + var NodePools []*api.NodePool + for i := 1; i <= count; i++ { + c, err := f.NewNodePool(f.NewID()) + if err != nil { + return nil, err + } + NodePools = append(NodePools, c) + } + return NodePools, nil +} + +// Aliases for test compatibility +func (f *Factories) NewNodePools(id string) (*api.NodePool, error) { + return f.NewNodePool(id) +} + +func (f *Factories) NewNodePoolsList(name string, count int) ([]*api.NodePool, error) { + return f.NewNodePoolList(name, count) +} diff --git a/test/helper.go b/test/helper.go new file mode 100755 index 0000000..522fd52 --- /dev/null +++ b/test/helper.go @@ -0,0 +1,508 @@ +package test + +import ( + "context" + "crypto/rsa" + "encoding/base64" + "encoding/json" + "fmt" + "os" + "strings" + "sync" + "testing" + "time" + + "github.com/bxcodec/faker/v3" + "github.com/golang-jwt/jwt/v4" + "github.com/golang/glog" + "github.com/google/uuid" + "github.com/segmentio/ksuid" + "github.com/spf13/pflag" + + amv1 "github.com/openshift-online/ocm-sdk-go/accountsmgmt/v1" + + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/environments" + "github.com/openshift-hyperfleet/hyperfleet-api/cmd/hyperfleet-api/server" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/config" + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/db" + "github.com/openshift-hyperfleet/hyperfleet-api/test/factories" + "github.com/openshift-hyperfleet/hyperfleet-api/test/mocks" +) + +const ( + apiPort = ":8777" + jwtKeyFile = "test/support/jwt_private_key.pem" + jwtCAFile = "test/support/jwt_ca.pem" + jwkKID = "uhctestkey" + jwkAlg = "RS256" +) + +var ( + helper *Helper + once sync.Once +) + +// jwkURL stores the JWK mock server URL for testing +var jwkURL string + +// TimeFunc defines a way to get a new Time instance common to the entire test suite. +// Aria's environment has Virtual Time that may not be actual time. We compensate +// by synchronizing on a common time func attached to the test harness. +type TimeFunc func() time.Time + +type Helper struct { + Ctx context.Context + DBFactory db.SessionFactory + AppConfig *config.ApplicationConfig + APIServer server.Server + MetricsServer server.Server + HealthCheckServer server.Server + TimeFunc TimeFunc + JWTPrivateKey *rsa.PrivateKey + JWTCA *rsa.PublicKey + T *testing.T + teardowns []func() error + Factories factories.Factories +} + +func NewHelper(t *testing.T) *Helper { + once.Do(func() { + jwtKey, jwtCA, err := parseJWTKeys() + if err != nil { + fmt.Println("Unable to read JWT keys - this may affect tests that make authenticated server requests") + } + + env := environments.Environment() + err = env.AddFlags(pflag.CommandLine) + if err != nil { + glog.Fatalf("Unable to add environment flags: %s", err.Error()) + } + if logLevel := os.Getenv("LOGLEVEL"); logLevel != "" { + glog.Infof("Using custom loglevel: %s", logLevel) + pflag.CommandLine.Set("-v", logLevel) + } + pflag.Parse() + + err = env.Initialize() + if err != nil { + glog.Fatalf("Unable to initialize testing environment: %s", err.Error()) + } + + helper = &Helper{ + AppConfig: environments.Environment().Config, + DBFactory: environments.Environment().Database.SessionFactory, + JWTPrivateKey: jwtKey, + JWTCA: jwtCA, + } + + // Start JWK certificate mock server for testing + jwkMockTeardown := helper.StartJWKCertServerMock() + helper.teardowns = []func() error{ + helper.CleanDB, + jwkMockTeardown, + helper.stopAPIServer, + helper.teardownEnv, + } + helper.startAPIServer() + helper.startMetricsServer() + helper.startHealthCheckServer() + }) + helper.T = t + return helper +} + +func (helper *Helper) Env() *environments.Env { + return environments.Environment() +} + +func (helper *Helper) teardownEnv() error { + helper.Env().Teardown() + return nil +} + +func (helper *Helper) Teardown() { + for _, f := range helper.teardowns { + err := f() + if err != nil { + helper.T.Errorf("error running teardown func: %s", err) + } + } +} + +func (helper *Helper) startAPIServer() { + // Configure JWK certificate URL for API server + helper.Env().Config.Server.JwkCertURL = jwkURL + helper.APIServer = server.NewAPIServer() + listener, err := helper.APIServer.Listen() + if err != nil { + glog.Fatalf("Unable to start Test API server: %s", err) + } + go func() { + glog.V(10).Info("Test API server started") + helper.APIServer.Serve(listener) + glog.V(10).Info("Test API server stopped") + }() +} + +func (helper *Helper) stopAPIServer() error { + if err := helper.APIServer.Stop(); err != nil { + return fmt.Errorf("unable to stop api server: %s", err.Error()) + } + return nil +} + +func (helper *Helper) startMetricsServer() { + helper.MetricsServer = server.NewMetricsServer() + go func() { + glog.V(10).Info("Test Metrics server started") + helper.MetricsServer.Start() + glog.V(10).Info("Test Metrics server stopped") + }() +} + +func (helper *Helper) stopMetricsServer() { + if err := helper.MetricsServer.Stop(); err != nil { + glog.Fatalf("Unable to stop metrics server: %s", err.Error()) + } +} + +func (helper *Helper) startHealthCheckServer() { + helper.HealthCheckServer = server.NewHealthCheckServer() + go func() { + glog.V(10).Info("Test health check server started") + helper.HealthCheckServer.Start() + glog.V(10).Info("Test health check server stopped") + }() +} + +func (helper *Helper) RestartServer() { + helper.stopAPIServer() + helper.startAPIServer() + glog.V(10).Info("Test API server restarted") +} + +func (helper *Helper) RestartMetricsServer() { + helper.stopMetricsServer() + helper.startMetricsServer() + glog.V(10).Info("Test metrics server restarted") +} + +func (helper *Helper) Reset() { + glog.Infof("Reseting testing environment") + env := environments.Environment() + // Reset the configuration + env.Config = config.NewApplicationConfig() + + // Re-read command-line configuration into a NEW flagset + // This new flag set ensures we don't hit conflicts defining the same flag twice + // Also on reset, we don't care to be re-defining 'v' and other glog flags + flagset := pflag.NewFlagSet(helper.NewID(), pflag.ContinueOnError) + env.AddFlags(flagset) + pflag.Parse() + + err := env.Initialize() + if err != nil { + glog.Fatalf("Unable to reset testing environment: %s", err.Error()) + } + helper.AppConfig = env.Config + helper.RestartServer() +} + +// NewID creates a new unique ID used internally to CS +func (helper *Helper) NewID() string { + return ksuid.New().String() +} + +// NewUUID creates a new unique UUID, which has different formatting than ksuid +// UUID is used by telemeter and we validate the format. +func (helper *Helper) NewUUID() string { + return uuid.New().String() +} + +func (helper *Helper) RestURL(path string) string { + protocol := "http" + if helper.AppConfig.Server.EnableHTTPS { + protocol = "https" + } + return fmt.Sprintf("%s://%s/api/hyperfleet/v1%s", protocol, helper.AppConfig.Server.BindAddress, path) +} + +func (helper *Helper) MetricsURL(path string) string { + return fmt.Sprintf("http://%s%s", helper.AppConfig.Metrics.BindAddress, path) +} + +func (helper *Helper) HealthCheckURL(path string) string { + return fmt.Sprintf("http://%s%s", helper.AppConfig.HealthCheck.BindAddress, path) +} + +func (helper *Helper) NewApiClient() *openapi.APIClient { + config := openapi.NewConfiguration() + client := openapi.NewAPIClient(config) + return client +} + +func (helper *Helper) NewRandAccount() *amv1.Account { + return helper.NewAccount(helper.NewID(), faker.Name(), faker.Email()) +} + +func (helper *Helper) NewAccount(username, name, email string) *amv1.Account { + var firstName string + var lastName string + names := strings.SplitN(name, " ", 2) + if len(names) < 2 { + firstName = name + lastName = "" + } else { + firstName = names[0] + lastName = names[1] + } + + builder := amv1.NewAccount(). + Username(username). + FirstName(firstName). + LastName(lastName). + Email(email) + + acct, err := builder.Build() + if err != nil { + helper.T.Errorf("Unable to build account: %s", err) + } + return acct +} + +func (helper *Helper) NewAuthenticatedContext(account *amv1.Account) context.Context { + tokenString := helper.CreateJWTString(account) + return context.WithValue(context.Background(), openapi.ContextAccessToken, tokenString) +} + +func (helper *Helper) StartJWKCertServerMock() (teardown func() error) { + jwkURL, teardown = mocks.NewJWKCertServerMock(helper.T, helper.JWTCA, jwkKID, jwkAlg) + helper.Env().Config.Server.JwkCertURL = jwkURL + return teardown +} + +func (helper *Helper) DeleteAll(table interface{}) { + g2 := helper.DBFactory.New(context.Background()) + err := g2.Model(table).Unscoped().Delete(table).Error + if err != nil { + helper.T.Errorf("error deleting from table %v: %v", table, err) + } +} + +func (helper *Helper) Delete(obj interface{}) { + g2 := helper.DBFactory.New(context.Background()) + err := g2.Unscoped().Delete(obj).Error + if err != nil { + helper.T.Errorf("error deleting object %v: %v", obj, err) + } +} + +func (helper *Helper) SkipIfShort() { + if testing.Short() { + helper.T.Skip("Skipping execution of test in short mode") + } +} + +func (helper *Helper) Count(table string) int64 { + g2 := helper.DBFactory.New(context.Background()) + var count int64 + err := g2.Table(table).Count(&count).Error + if err != nil { + helper.T.Errorf("error getting count for table %s: %v", table, err) + } + return count +} + +func (helper *Helper) MigrateDB() error { + return db.Migrate(helper.DBFactory.New(context.Background())) +} + +func (helper *Helper) MigrateDBTo(migrationID string) { + db.MigrateTo(helper.DBFactory, migrationID) +} + +func (helper *Helper) ClearAllTables() { + // Reserved for future use +} + +func (helper *Helper) CleanDB() error { + g2 := helper.DBFactory.New(context.Background()) + + // TODO: this list should not be static or otherwise not hard-coded here. + for _, table := range []string{ + // REMOVED: "events" - Events table no longer used - no event-driven components + "migrations", + } { + if g2.Migrator().HasTable(table) { + if err := g2.Migrator().DropTable(table); err != nil { + helper.T.Errorf("error dropping table %s: %v", table, err) + return err + } + } + // No error if table doesn't exist - it may not have been created yet + } + return nil +} + +func (helper *Helper) ResetDB() error { + if err := helper.CleanDB(); err != nil { + return err + } + + if err := helper.MigrateDB(); err != nil { + return err + } + + return nil +} + +func (helper *Helper) CreateJWTString(account *amv1.Account) string { + // Use an RH SSO JWT by default since we are phasing RHD out + claims := jwt.MapClaims{ + "iss": helper.Env().Config.OCM.TokenURL, + "username": strings.ToLower(account.Username()), + "first_name": account.FirstName(), + "last_name": account.LastName(), + "typ": "Bearer", + "iat": time.Now().Unix(), + "exp": time.Now().Add(1 * time.Hour).Unix(), + } + if account.Email() != "" { + claims["email"] = account.Email() + } + /* TODO the ocm api model needs to be updated to expose this + if account.ServiceAccount { + claims["clientId"] = account.Username() + } + */ + + token := jwt.NewWithClaims(jwt.SigningMethodRS256, claims) + // Set the token header kid to the same value we expect when validating the token + // The kid is an arbitrary identifier for the key + // See https://tools.ietf.org/html/rfc7517#section-4.5 + token.Header["kid"] = jwkKID + + // private key and public key taken from http://kjur.github.io/jsjws/tool_jwt.html + // the go-jwt-middleware pkg we use does the same for their tests + signedToken, err := token.SignedString(helper.JWTPrivateKey) + if err != nil { + helper.T.Errorf("Unable to sign test jwt: %s", err) + return "" + } + return signedToken +} + +func (helper *Helper) CreateJWTToken(account *amv1.Account) *jwt.Token { + tokenString := helper.CreateJWTString(account) + + token, err := jwt.Parse(tokenString, func(token *jwt.Token) (interface{}, error) { + return helper.JWTCA, nil + }) + if err != nil { + helper.T.Errorf("Unable to parse signed jwt: %s", err) + return nil + } + return token +} + +// OpenapiError Convert an error response from the openapi client to an openapi error struct +func (helper *Helper) OpenapiError(err error) openapi.Error { + generic := err.(openapi.GenericOpenAPIError) + var exErr openapi.Error + jsonErr := json.Unmarshal(generic.Body(), &exErr) + if jsonErr != nil { + helper.T.Errorf("Unable to convert error response to openapi error: %s", jsonErr) + } + return exErr +} + +func parseJWTKeys() (*rsa.PrivateKey, *rsa.PublicKey, error) { + // projectRootDir := getProjectRootDir() + // privateBytes, err := os.ReadFile(filepath.Join(projectRootDir, jwtKeyFile)) + privateBytes, err := privatebytes() + if err != nil { + err = fmt.Errorf("unable to read JWT key file %s: %s", jwtKeyFile, err) + return nil, nil, err + } + // pubBytes, err := ioutil.ReadFile(filepath.Join(projectRootDir, jwtCAFile)) + pubBytes, err := publicbytes() + if err != nil { + err = fmt.Errorf("unable to read JWT ca file %s: %s", jwtKeyFile, err) + return nil, nil, err + } + + // Parse keys + privateKey, err := jwt.ParseRSAPrivateKeyFromPEMWithPassword(privateBytes, "passwd") + if err != nil { + err = fmt.Errorf("unable to parse JWT private key: %s", err) + return nil, nil, err + } + pubKey, err := jwt.ParseRSAPublicKeyFromPEM(pubBytes) + if err != nil { + err = fmt.Errorf("unable to parse JWT ca: %s", err) + return nil, nil, err + } + + return privateKey, pubKey, nil +} + +func privatebytes() ([]byte, error) { + s := `LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpQcm9jLVR5cGU6IDQsRU5DUllQVEVECkRF +Sy1JbmZvOiBERVMtRURFMy1DQkMsMkU2NTExOEU2QzdCNTIwNwoKN2NZVVRXNFpCZG1WWjRJTEIw +OGhjVGRtNWliMEUwemN5K0k3cEhwTlFmSkh0STdCSjRvbXlzNVMxOXVmSlBCSgpJellqZU83b1RW +cUkzN0Y2RVVtalpxRzRXVkUyVVFiUURrb3NaYlpOODJPNElwdTFsRkFQRWJ3anFlUE1LdWZ6CnNu +U1FIS2ZuYnl5RFBFVk5sSmJzMTlOWEM4djZnK3BRYXk1ckgvSTZOMmlCeGdzVG11ZW1aNTRFaE5R +TVp5RU4KUi9DaWhlQXJXRUg5SDgvNGhkMmdjOVRiMnMwTXdHSElMTDRrYmJObTV0cDN4dzRpazdP +WVdOcmozbStuRzZYYgp2S1hoMnhFYW5BWkF5TVhUcURKVEhkbjcvQ0VxdXNRUEpqWkdWK01mMWtq +S3U3cDRxY1hGbklYUDVJTG5UVzdiCmxIb1dDNGV3ZUR6S09NUnpYbWJBQkVWU1V2eDJTbVBsNFRj +b0M1TDFTQ0FIRW1aYUtiYVk3UzVsNTN1NmdsMGYKVUx1UWJ0N0hyM1RIem5sTkZLa0dUMS95Vk50 +MlFPbTFlbVpkNTVMYU5lOEU3WHNOU2xobDBncllRK1VlOEpiYQp4ODVPYXBsdFZqeE05d1ZDd2Jn +RnlpMDRpaGRLSG85ZSt1WUtlVEdLdjBoVTVPN0hFSDFldjZ0L3MydS9VRzZoClRxRXNZclZwMENN +SHB0NXVBRjZuWnlLNkdaL0NIVHhoL3J6MWhBRE1vZmVtNTkrZTZ0VnRqblBHQTNFam5KVDgKQk1P +dy9EMlFJRHhqeGoyR1V6eitZSnA1MEVOaFdyTDlvU0RrRzJuenY0TlZMNzdRSXkrVC8yL2Y0UGdv +a1VETwpRSmpJZnhQV0U0MGNIR0hwblF0WnZFUG94UDBIM1QwWWhtRVZ3dUp4WDN1YVdPWS84RmEx +YzdMbjBTd1dkZlY1CmdZdkpWOG82YzNzdW1jcTFPM2FnUERsSEM1TzRJeEc3QVpROENIUkR5QVNv +Z3pma1k2UDU3OVpPR1lhTzRhbDcKV0ExWUlwc0hzMy8xZjRTQnlNdVdlME5Wa0Zmdlhja2pwcUdy +QlFwVG1xUXprNmJhYTBWUTBjd1UzWGxrd0hhYwpXQi9mUTRqeWx3RnpaRGNwNUpBbzUzbjZhVTcy +emdOdkRsR1ROS3dkWFhaSTVVM0pQb2NIMEFpWmdGRldZSkxkCjYzUEpMRG5qeUUzaTZYTVZseGlm +WEtrWFZ2MFJZU3orQnlTN096OWFDZ25RaE5VOHljditVeHRma1BRaWg1ekUKLzBZMkVFRmtuYWpt +RkpwTlhjenpGOE9FemFzd21SMEFPamNDaWtsWktSZjYxcmY1ZmFKeEpoaHFLRUVCSnVMNgpvb2RE +VlJrM09HVTF5UVNCYXpUOG5LM1YrZTZGTW8zdFdrcmEyQlhGQ0QrcEt4VHkwMTRDcDU5UzF3NkYx +Rmp0CldYN2VNV1NMV2ZRNTZqMmtMTUJIcTVnYjJhcnFscUgzZnNZT1REM1ROakNZRjNTZ3gzMDlr +VlB1T0s1dnc2MVAKcG5ML0xOM2lHWTQyV1IrOWxmQXlOTjJxajl6dndLd3NjeVlzNStEUFFvUG1j +UGNWR2Mzdi91NjZiTGNPR2JFVQpPbEdhLzZnZEQ0R0NwNUU0ZlAvN0dibkVZL1BXMmFicXVGaEdC +K3BWZGwzLzQrMVUvOGtJdGxmV05ab0c0RmhFCmdqTWQ3Z2xtcmRGaU5KRkZwZjVrczFsVlhHcUo0 +bVp4cXRFWnJ4VUV3Y2laam00VjI3YStFMkt5VjlObmtzWjYKeEY0dEdQS0lQc3ZOVFY1bzhacWpp +YWN4Z2JZbXIyeXdxRFhLQ2dwVS9SV1NoMXNMYXBxU1FxYkgvdzBNcXVVagpWaFZYMFJNWUgvZm9L +dGphZ1pmL0tPMS9tbkNJVGw4NnRyZUlkYWNoR2dSNHdyL3FxTWpycFBVYVBMQ1JZM0pRCjAwWFVQ +MU11NllQRTBTbk1ZQVZ4WmhlcUtIbHkzYTFwZzRYcDdZV2xNNjcxb1VPUnMzK1ZFTmZuYkl4Z3Ir +MkQKVGlKVDlQeHdwZks1M09oN1JCU1dISlpSdUFkTFVYRThERytibDBOL1FrSk02cEZVeFRJMUFR +PT0KLS0tLS1FTkQgUlNBIFBSSVZBVEUgS0VZLS0tLS0K` + + return base64.StdEncoding.DecodeString(s) +} + +func publicbytes() ([]byte, error) { + s := `LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUMvekNDQWVlZ0F3SUJBZ0lCQVRBTkJna3Fo +a2lHOXcwQkFRVUZBREFhTVFzd0NRWURWUVFHRXdKVlV6RUwKTUFrR0ExVUVDZ3dDV2pRd0hoY05N +VE13T0RJNE1UZ3lPRE0wV2hjTk1qTXdPREk0TVRneU9ETTBXakFhTVFzdwpDUVlEVlFRR0V3SlZV +ekVMTUFrR0ExVUVDZ3dDV2pRd2dnRWlNQTBHQ1NxR1NJYjNEUUVCQVFVQUE0SUJEd0F3CmdnRUtB +b0lCQVFEZmRPcW90SGQ1NVNZTzBkTHoyb1hlbmd3L3RaK3EzWm1PUGVWbU11T01JWU8vQ3Yxd2sy +VTAKT0s0cHVnNE9CU0pQaGwwOVpzNkl3QjhOd1BPVTdFRFRnTU9jUVVZQi82UU5DSTFKN1ptMm9M +dHVjaHp6NHBJYgorbzRaQWhWcHJMaFJ5dnFpOE9US1E3a2ZHZnM1VHV3bW4xTS8wZlFrZnpNeEFE +cGpPS05nZjB1eTZsTjZ1dGpkClRyUEtLRlVRTmRjNi9UeThFZVRuUUV3VWxzVDJMQVhDZkVLeFRu +NVJsUmxqRHp0UzdTZmdzOFZMMEZQeTFRaTgKQitkRmNnUllLRnJjcHNWYVoxbEJtWEtzWERSdTVR +Ui9SZzNmOURScTRHUjFzTkg4UkxZOXVBcE1sMlNOeitzUgo0elJQRzg1Ui9zZTVRMDZHdTBCVVEz +VVBtNjdFVFZaTEFnTUJBQUdqVURCT01CMEdBMVVkRGdRV0JCUUhaUFRFCnlRVnUvMEkvM1FXaGxU +eVc3V29UelRBZkJnTlZIU01FR0RBV2dCUUhaUFRFeVFWdS8wSS8zUVdobFR5VzdXb1QKelRBTUJn +TlZIUk1FQlRBREFRSC9NQTBHQ1NxR1NJYjNEUUVCQlFVQUE0SUJBUURIeHFKOXk4YWxUSDdhZ1ZN +VwpaZmljL1JicmR2SHd5cStJT3JnRFRvcXlvMHcrSVo2QkNuOXZqdjVpdWhxdTRGb3JPV0RBRnBR +S1pXMERMQkpFClF5LzcvMCs5cGsyRFBoSzFYemRPb3ZsU3JrUnQrR2NFcEduVVhuekFDWERCYk8w +K1dyaytoY2pFa1FSUksxYlcKMnJrbkFSSUVKRzlHUytwU2hQOUJxLzBCbU5zTWVwZE5jQmEwejNh +NUIwZnpGeUNRb1VsWDZSVHF4UncxaDFRdAo1RjAwcGZzcDdTalhWSXZZY2V3SGFOQVNidG8xbjVo +clN6MVZZOWhMYmExMWl2TDFONFdvV2JtekFMNkJXYWJzCkMyRC9NZW5TVDIvWDZoVEt5R1hwZzNF +ZzJoM2lMdlV0d2NObnkwaFJLc3RjNzNKbDl4UjNxWGZYS0pIMFRoVGwKcTBncQotLS0tLUVORCBD +RVJUSUZJQ0FURS0tLS0tCg==` + return base64.StdEncoding.DecodeString(s) +} diff --git a/test/integration/adapter_status_test.go b/test/integration/adapter_status_test.go new file mode 100644 index 0000000..f442caa --- /dev/null +++ b/test/integration/adapter_status_test.go @@ -0,0 +1,358 @@ +package integration + +import ( + "fmt" + "net/http" + "testing" + + . "github.com/onsi/gomega" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/test" +) + +// TestClusterStatusPost tests creating adapter status for a cluster +func TestClusterStatusPost(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a cluster first + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // Create an adapter status for the cluster + statusInput := openapi.AdapterStatusCreateRequest{ + Adapter: "test-adapter", + ObservedGeneration: cluster.Generation, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "True", + Reason: openapi.PtrString("AdapterReady"), + }, + }, + Data: map[string]interface{}{ + "test_key": "test_value", + }, + } + + statusOutput, resp, err := client.DefaultAPI.PostClusterStatuses(ctx, cluster.ID).AdapterStatusCreateRequest(statusInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Error posting cluster status: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(statusOutput.Adapter).To(Equal("test-adapter")) + Expect(statusOutput.ObservedGeneration).To(Equal(cluster.Generation)) + Expect(len(statusOutput.Conditions)).To(BeNumerically(">", 0)) +} + +// TestClusterStatusGet tests retrieving adapter statuses for a cluster +func TestClusterStatusGet(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a cluster first + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // Create a few adapter statuses + for i := 0; i < 3; i++ { + statusInput := openapi.AdapterStatusCreateRequest{ + Adapter: fmt.Sprintf("adapter-%d", i), + ObservedGeneration: cluster.Generation, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "True", + }, + }, + } + _, _, err := client.DefaultAPI.PostClusterStatuses(ctx, cluster.ID).AdapterStatusCreateRequest(statusInput).Execute() + Expect(err).NotTo(HaveOccurred()) + } + + // Get all statuses for the cluster + list, resp, err := client.DefaultAPI.GetClusterStatuses(ctx, cluster.ID).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting cluster statuses: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + Expect(list).NotTo(BeNil()) + Expect(len(list.Items)).To(BeNumerically(">=", 3)) +} + +// TestNodePoolStatusPost tests creating adapter status for a nodepool +func TestNodePoolStatusPost(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a nodepool (which also creates its parent cluster) + nodePool, err := h.Factories.NewNodePools(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + Expect(nodePool).NotTo(BeNil(), "nodePool should not be nil") + Expect(nodePool.OwnerID).NotTo(BeEmpty(), "nodePool.OwnerID should not be empty") + Expect(nodePool.ID).NotTo(BeEmpty(), "nodePool.ID should not be empty") + + // Create an adapter status for the nodepool + statusInput := openapi.AdapterStatusCreateRequest{ + Adapter: "test-nodepool-adapter", + ObservedGeneration: 1, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "False", + Reason: openapi.PtrString("Initializing"), + }, + }, + Data: map[string]interface{}{ + "nodepool_data": "value", + }, + } + + // Use nodePool.OwnerID as the cluster_id parameter + statusOutput, resp, err := client.DefaultAPI.PostNodePoolStatuses(ctx, nodePool.OwnerID, nodePool.ID).AdapterStatusCreateRequest(statusInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Error posting nodepool status: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(statusOutput.Adapter).To(Equal("test-nodepool-adapter")) + Expect(len(statusOutput.Conditions)).To(BeNumerically(">", 0)) +} + +// TestNodePoolStatusGet tests retrieving adapter statuses for a nodepool +func TestNodePoolStatusGet(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a nodepool (which also creates its parent cluster) + nodePool, err := h.Factories.NewNodePools(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // Create a few adapter statuses + for i := 0; i < 2; i++ { + statusInput := openapi.AdapterStatusCreateRequest{ + Adapter: fmt.Sprintf("nodepool-adapter-%d", i), + ObservedGeneration: 1, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "True", + }, + }, + } + // Use nodePool.OwnerID as the cluster_id parameter + _, _, err := client.DefaultAPI.PostNodePoolStatuses(ctx, nodePool.OwnerID, nodePool.ID).AdapterStatusCreateRequest(statusInput).Execute() + Expect(err).NotTo(HaveOccurred()) + } + + // Get all statuses for the nodepool + list, resp, err := client.DefaultAPI.GetNodePoolsStatuses(ctx, nodePool.OwnerID, nodePool.ID).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting nodepool statuses: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + Expect(list).NotTo(BeNil()) + Expect(len(list.Items)).To(BeNumerically(">=", 2)) +} + +// TestAdapterStatusPaging tests paging for adapter statuses +func TestAdapterStatusPaging(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a cluster + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // Create multiple statuses + for i := 0; i < 10; i++ { + statusInput := openapi.AdapterStatusCreateRequest{ + Adapter: fmt.Sprintf("adapter-%d", i), + ObservedGeneration: cluster.Generation, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "True", + }, + }, + } + _, _, err := client.DefaultAPI.PostClusterStatuses(ctx, cluster.ID).AdapterStatusCreateRequest(statusInput).Execute() + Expect(err).NotTo(HaveOccurred()) + } + + // Test paging + list, _, err := client.DefaultAPI.GetClusterStatuses(ctx, cluster.ID).Page(1).PageSize(5).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(len(list.Items)).To(BeNumerically("<=", 5)) + Expect(list.Page).To(Equal(int32(1))) +} + +// TestAdapterStatusIdempotency tests that posting the same adapter twice updates instead of creating duplicate +func TestAdapterStatusIdempotency(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a cluster + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // First POST: Create adapter status + statusInput1 := openapi.AdapterStatusCreateRequest{ + Adapter: "idempotency-test-adapter", + ObservedGeneration: cluster.Generation, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "False", + Reason: openapi.PtrString("Initializing"), + }, + }, + Data: map[string]interface{}{ + "version": "1.0", + }, + } + + status1, resp, err := client.DefaultAPI.PostClusterStatuses(ctx, cluster.ID).AdapterStatusCreateRequest(statusInput1).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(status1.Adapter).To(Equal("idempotency-test-adapter")) + Expect(status1.Conditions[0].Status).To(Equal("False")) + + // Second POST: Update the same adapter with different conditions + statusInput2 := openapi.AdapterStatusCreateRequest{ + Adapter: "idempotency-test-adapter", + ObservedGeneration: cluster.Generation, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "True", + Reason: openapi.PtrString("AdapterReady"), + }, + }, + Data: map[string]interface{}{ + "version": "2.0", + }, + } + + status2, resp, err := client.DefaultAPI.PostClusterStatuses(ctx, cluster.ID).AdapterStatusCreateRequest(statusInput2).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(status2.Adapter).To(Equal("idempotency-test-adapter")) + Expect(status2.Conditions[0].Status).To(Equal("True")) + + // GET all statuses - should have only ONE status for "idempotency-test-adapter" + list, _, err := client.DefaultAPI.GetClusterStatuses(ctx, cluster.ID).Execute() + Expect(err).NotTo(HaveOccurred()) + + // Count how many times this adapter appears + adapterCount := 0 + var finalStatus openapi.AdapterStatus + for _, s := range list.Items { + if s.Adapter == "idempotency-test-adapter" { + adapterCount++ + finalStatus = s + } + } + + // Verify: should have exactly ONE entry for this adapter (updated, not duplicated) + Expect(adapterCount).To(Equal(1), "Adapter should be updated, not duplicated") + Expect(finalStatus.Conditions[0].Status).To(Equal("True"), "Conditions should be updated to latest") +} + +// TestAdapterStatusPagingEdgeCases tests edge cases in pagination +func TestAdapterStatusPagingEdgeCases(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a cluster + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // Create exactly 10 statuses + for i := 0; i < 10; i++ { + statusInput := openapi.AdapterStatusCreateRequest{ + Adapter: fmt.Sprintf("edge-adapter-%d", i), + ObservedGeneration: cluster.Generation, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "True", + }, + }, + } + _, _, err := client.DefaultAPI.PostClusterStatuses(ctx, cluster.ID).AdapterStatusCreateRequest(statusInput).Execute() + Expect(err).NotTo(HaveOccurred()) + } + + // Test 1: Empty dataset pagination (different cluster with no statuses) + emptyCluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + emptyList, _, err := client.DefaultAPI.GetClusterStatuses(ctx, emptyCluster.ID).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(emptyList.Total).To(Equal(int32(0))) + Expect(len(emptyList.Items)).To(Equal(0)) + + // Test 2: Page beyond total pages + beyondList, _, err := client.DefaultAPI.GetClusterStatuses(ctx, cluster.ID).Page(100).PageSize(5).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(len(beyondList.Items)).To(Equal(0), "Should return empty when page exceeds total pages") + Expect(beyondList.Total).To(Equal(int32(10)), "Total should still reflect actual count") + + // Test 3: Single item dataset + singleCluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + singleStatus := openapi.AdapterStatusCreateRequest{ + Adapter: "single-adapter", + ObservedGeneration: singleCluster.Generation, + Conditions: []openapi.Condition{ + { + Type: "Ready", + Status: "True", + }, + }, + } + _, _, err = client.DefaultAPI.PostClusterStatuses(ctx, singleCluster.ID).AdapterStatusCreateRequest(singleStatus).Execute() + Expect(err).NotTo(HaveOccurred()) + + singleList, _, err := client.DefaultAPI.GetClusterStatuses(ctx, singleCluster.ID).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(singleList.Total).To(Equal(int32(1))) + Expect(len(singleList.Items)).To(Equal(1)) + Expect(singleList.Page).To(Equal(int32(1))) + + // Test 4: Pagination consistency - verify no duplicates and no missing items + allItems := make(map[string]bool) + page := 1 + pageSize := 3 + + for { + list, _, err := client.DefaultAPI.GetClusterStatuses(ctx, cluster.ID).Page(int32(page)).PageSize(int32(pageSize)).Execute() + Expect(err).NotTo(HaveOccurred()) + + if len(list.Items) == 0 { + break + } + + for _, item := range list.Items { + adapter := item.Adapter + Expect(allItems[adapter]).To(BeFalse(), "Duplicate adapter found in pagination: %s", adapter) + allItems[adapter] = true + } + + page++ + if page > 10 { + break // Safety limit + } + } + + // Verify we got all 10 unique adapters + Expect(len(allItems)).To(Equal(10), "Should retrieve all items exactly once across pages") +} diff --git a/test/integration/clusters_test.go b/test/integration/clusters_test.go new file mode 100644 index 0000000..9aa653d --- /dev/null +++ b/test/integration/clusters_test.go @@ -0,0 +1,276 @@ +package integration + +import ( + "context" + "fmt" + "net/http" + "testing" + + . "github.com/onsi/gomega" + "gopkg.in/resty.v1" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/test" +) + +func TestClusterGet(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // 401 using no JWT token + _, _, err := client.DefaultAPI.GetClusterById(context.Background(), "foo").Execute() + Expect(err).To(HaveOccurred(), "Expected 401 but got nil error") + + // GET responses per openapi spec: 200 and 404, + _, resp, err := client.DefaultAPI.GetClusterById(ctx, "foo").Execute() + Expect(err).To(HaveOccurred(), "Expected 404") + Expect(resp.StatusCode).To(Equal(http.StatusNotFound)) + + clusterModel, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + clusterOutput, resp, err := client.DefaultAPI.GetClusterById(ctx, clusterModel.ID).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + + Expect(*clusterOutput.Id).To(Equal(clusterModel.ID), "found object does not match test object") + Expect(clusterOutput.Kind).To(Equal("Cluster")) + Expect(*clusterOutput.Href).To(Equal(fmt.Sprintf("/api/hyperfleet/v1/clusters/%s", clusterModel.ID))) + Expect(clusterOutput.CreatedAt).To(BeTemporally("~", clusterModel.CreatedAt)) + Expect(clusterOutput.UpdatedAt).To(BeTemporally("~", clusterModel.UpdatedAt)) +} + +func TestClusterPost(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // POST responses per openapi spec: 201, 409, 500 + clusterInput := openapi.ClusterCreateRequest{ + Kind: "Cluster", + Name: "test-name", + Spec: map[string]interface{}{"test": "spec"}, + } + + // 201 Created + clusterOutput, resp, err := client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(clusterInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Error posting object: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(*clusterOutput.Id).NotTo(BeEmpty(), "Expected ID assigned on creation") + Expect(clusterOutput.Kind).To(Equal("Cluster")) + Expect(*clusterOutput.Href).To(Equal(fmt.Sprintf("/api/hyperfleet/v1/clusters/%s", *clusterOutput.Id))) + + // 400 bad request. posting junk json is one way to trigger 400. + jwtToken := ctx.Value(openapi.ContextAccessToken) + restyResp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetHeader("Authorization", fmt.Sprintf("Bearer %s", jwtToken)). + SetBody(`{ this is invalid }`). + Post(h.RestURL("/clusters")) + + Expect(restyResp.StatusCode()).To(Equal(http.StatusBadRequest)) +} + +// TestClusterPatch is disabled because PATCH endpoints are not implemented +// func TestClusterPatch(t *testing.T) { +// // PATCH not implemented in current API +// } + +func TestClusterPaging(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Paging + _, err := h.Factories.NewClustersList("Bronto", 20) + Expect(err).NotTo(HaveOccurred()) + + list, _, err := client.DefaultAPI.GetClusters(ctx).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting cluster list: %v", err) + Expect(len(list.Items)).To(Equal(20)) + Expect(list.Size).To(Equal(int32(20))) + Expect(list.Total).To(Equal(int32(20))) + Expect(list.Page).To(Equal(int32(1))) + + list, _, err = client.DefaultAPI.GetClusters(ctx).Page(2).PageSize(5).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting cluster list: %v", err) + Expect(len(list.Items)).To(Equal(5)) + Expect(list.Size).To(Equal(int32(5))) + Expect(list.Total).To(Equal(int32(20))) + Expect(list.Page).To(Equal(int32(2))) +} + +func TestClusterListSearch(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + clusters, err := h.Factories.NewClustersList("bronto", 20) + Expect(err).NotTo(HaveOccurred(), "Error creating test clusters: %v", err) + + search := fmt.Sprintf("id in ('%s')", clusters[0].ID) + list, _, err := client.DefaultAPI.GetClusters(ctx).Search(search).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting cluster list: %v", err) + Expect(len(list.Items)).To(Equal(1)) + Expect(list.Total).To(Equal(int32(1))) + Expect(*list.Items[0].Id).To(Equal(clusters[0].ID)) +} + +// TestClusterSearchSQLInjection tests SQL injection protection in search +func TestClusterSearchSQLInjection(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a few clusters + clusters, err := h.Factories.NewClustersList("injection-test", 5) + Expect(err).NotTo(HaveOccurred()) + + // Test 1: SQL injection attempt with OR + maliciousSearch := "id='anything' OR '1'='1'" + _, _, err = client.DefaultAPI.GetClusters(ctx).Search(maliciousSearch).Execute() + // Should either return 400 error or return empty/controlled results + // Not crash or return all data + if err == nil { + // If no error, the search should not return everything + t.Logf("Search with SQL injection did not error - implementation may handle it gracefully") + } + + // Test 2: SQL injection attempt with DROP + dropSearch := "id='; DROP TABLE clusters; --" + _, _, err = client.DefaultAPI.GetClusters(ctx).Search(dropSearch).Execute() + // Should not crash + if err == nil { + t.Logf("Search with DROP statement did not error - implementation may handle it gracefully") + } + + // Test 3: Verify clusters still exist after injection attempts + list, _, err := client.DefaultAPI.GetClusters(ctx).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(list.Total).To(BeNumerically(">=", 5), "Clusters should still exist after injection attempts") + + // Test 4: Valid search still works + validSearch := fmt.Sprintf("id='%s'", clusters[0].ID) + validList, _, err := client.DefaultAPI.GetClusters(ctx).Search(validSearch).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(len(validList.Items)).To(BeNumerically(">=", 0)) +} + +// TestClusterDuplicateNames tests that duplicate cluster names are rejected +func TestClusterDuplicateNames(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create first cluster with a specific name + clusterInput := openapi.ClusterCreateRequest{ + Kind: "Cluster", + Name: "duplicate-name-test", + Spec: map[string]interface{}{"test": "spec1"}, + } + + cluster1, resp, err := client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(clusterInput).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + id1 := *cluster1.Id + + // Create second cluster with the SAME name + // Names are unique, so this should return 409 Conflict + _, resp, err = client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(clusterInput).Execute() + Expect(err).To(HaveOccurred(), "Expected 409 Conflict for duplicate name") + Expect(resp.StatusCode).To(Equal(http.StatusConflict)) + + // Verify first cluster still exists + retrieved1, _, err := client.DefaultAPI.GetClusterById(ctx, id1).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(retrieved1.Name).To(Equal("duplicate-name-test")) +} + +// TestClusterBoundaryValues tests boundary values for cluster fields +func TestClusterBoundaryValues(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Test 1: Maximum name length (database limit is 63 characters) + longName := "" + for i := 0; i < 63; i++ { + longName += "a" + } + + longNameInput := openapi.ClusterCreateRequest{ + Kind: "Cluster", + Name: longName, + Spec: map[string]interface{}{"test": "spec"}, + } + + longNameCluster, resp, err := client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(longNameInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Should accept name up to 63 characters") + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(longNameCluster.Name).To(Equal(longName)) + + // Test exceeding max length (64 characters should fail) + tooLongName := longName + "a" + tooLongInput := openapi.ClusterCreateRequest{ + Kind: "Cluster", + Name: tooLongName, + Spec: map[string]interface{}{"test": "spec"}, + } + _, resp, err = client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(tooLongInput).Execute() + Expect(err).To(HaveOccurred(), "Should reject name exceeding 63 characters") + Expect(resp.StatusCode).To(Equal(http.StatusInternalServerError)) + + // Test 2: Empty name + emptyNameInput := openapi.ClusterCreateRequest{ + Kind: "Cluster", + Name: "", + Spec: map[string]interface{}{"test": "spec"}, + } + + _, resp, err = client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(emptyNameInput).Execute() + // Should either accept empty name or return 400 + if resp != nil { + t.Logf("Empty name test returned status: %d", resp.StatusCode) + } + + // Test 3: Large spec JSON (test with ~10KB JSON) + largeSpec := make(map[string]interface{}) + for i := 0; i < 100; i++ { + largeSpec[fmt.Sprintf("key_%d", i)] = fmt.Sprintf("value_%d_with_some_padding_to_increase_size_xxxxxxxxxxxxxxxxxx", i) + } + + largeSpecInput := openapi.ClusterCreateRequest{ + Kind: "Cluster", + Name: "large-spec-test", + Spec: largeSpec, + } + + largeSpecCluster, resp, err := client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(largeSpecInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Should accept large spec JSON") + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + + // Verify the spec was stored correctly + retrieved, _, err := client.DefaultAPI.GetClusterById(ctx, *largeSpecCluster.Id).Execute() + Expect(err).NotTo(HaveOccurred()) + Expect(len(retrieved.Spec)).To(Equal(100)) + + // Test 4: Unicode in name + unicodeNameInput := openapi.ClusterCreateRequest{ + Kind: "Cluster", + Name: "テスト-δοκιμή-🚀", + Spec: map[string]interface{}{"test": "spec"}, + } + + unicodeNameCluster, resp, err := client.DefaultAPI.PostCluster(ctx).ClusterCreateRequest(unicodeNameInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Should accept unicode in name") + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(unicodeNameCluster.Name).To(Equal("テスト-δοκιμή-🚀")) +} diff --git a/test/integration/compatibility_test.go b/test/integration/compatibility_test.go new file mode 100644 index 0000000..e8a7f6c --- /dev/null +++ b/test/integration/compatibility_test.go @@ -0,0 +1,34 @@ +package integration + +import ( + "context" + "net/http" + "testing" + + . "github.com/onsi/gomega" + + "github.com/openshift-hyperfleet/hyperfleet-api/test" +) + +// TestCompatibilityGet tests the compatibility endpoint +func TestCompatibilityGet(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // GET /api/hyperfleet/v1/compatibility + result, resp, err := client.DefaultAPI.GetCompatibility(ctx).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting compatibility: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + Expect(result).NotTo(BeEmpty()) +} + +// TestCompatibilityNoAuth tests that compatibility endpoint requires authentication +func TestCompatibilityNoAuth(t *testing.T) { + _, client := test.RegisterIntegration(t) + + // Try to get compatibility without authentication + _, _, err := client.DefaultAPI.GetCompatibility(context.Background()).Execute() + Expect(err).To(HaveOccurred(), "Expected authentication error") +} diff --git a/test/integration/integration_test.go b/test/integration/integration_test.go new file mode 100755 index 0000000..096a8a3 --- /dev/null +++ b/test/integration/integration_test.go @@ -0,0 +1,21 @@ +package integration + +import ( + "flag" + "os" + "runtime" + "testing" + + "github.com/golang/glog" + + "github.com/openshift-hyperfleet/hyperfleet-api/test" +) + +func TestMain(m *testing.M) { + flag.Parse() + glog.Infof("Starting integration test using go version %s", runtime.Version()) + helper := test.NewHelper(&testing.T{}) + exitCode := m.Run() + helper.Teardown() + os.Exit(exitCode) +} diff --git a/test/integration/metadata_test.go b/test/integration/metadata_test.go new file mode 100755 index 0000000..96779b6 --- /dev/null +++ b/test/integration/metadata_test.go @@ -0,0 +1,65 @@ +/* +Copyright (c) 2018 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "testing" + + . "github.com/onsi/gomega" + "gopkg.in/resty.v1" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api" + "github.com/openshift-hyperfleet/hyperfleet-api/test" +) + +func TestMetadataGet(t *testing.T) { + h, _ := test.RegisterIntegration(t) + + // Build the metadata URL (metadata endpoint is at /api/hyperfleet, not /api/hyperfleet/v1) + protocol := "http" + if h.AppConfig.Server.EnableHTTPS { + protocol = "https" + } + metadataURL := fmt.Sprintf("%s://%s/api/hyperfleet", protocol, h.AppConfig.Server.BindAddress) + + // Test GET /api/hyperfleet - metadata endpoint doesn't require authentication + resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + Get(metadataURL) + + Expect(err).NotTo(HaveOccurred(), "Error getting metadata: %v", err) + Expect(resp.StatusCode()).To(Equal(http.StatusOK)) + + // Parse the response body + var metadata api.Metadata + err = json.Unmarshal(resp.Body(), &metadata) + Expect(err).NotTo(HaveOccurred(), "Error parsing metadata response: %v", err) + // + // Verify content type header + contentType := resp.Header().Get("Content-Type") + Expect(contentType).To(Equal("application/json"), "Expected Content-Type to be application/json") + + // Verify all metadata fields + Expect(metadata.ID).To(Equal("hyperfleet"), "Expected ID to be 'hyperfleet'") + Expect(metadata.Kind).To(Equal("API"), "Expected Kind to be 'API'") + Expect(metadata.HREF).To(Equal("/api/hyperfleet"), "Expected HREF to match the request path") + Expect(metadata.Version).NotTo(BeEmpty(), "Expected Version to be set") + Expect(metadata.BuildTime).NotTo(BeEmpty(), "Expected BuildTime to be set") +} diff --git a/test/integration/node_pools_test.go b/test/integration/node_pools_test.go new file mode 100644 index 0000000..3bb721c --- /dev/null +++ b/test/integration/node_pools_test.go @@ -0,0 +1,204 @@ +package integration + +import ( + "fmt" + "net/http" + "testing" + + . "github.com/onsi/gomega" + "gopkg.in/resty.v1" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" + "github.com/openshift-hyperfleet/hyperfleet-api/test" +) + +// TestNodePoolGet is disabled because GET /nodepools/{id} is not in the OpenAPI spec +// The API only supports: +// - GET /api/hyperfleet/v1/nodepools (list all nodepools) +// - GET /api/hyperfleet/v1/clusters/{cluster_id}/nodepools (list nodepools by cluster) +// - POST /api/hyperfleet/v1/clusters/{cluster_id}/nodepools (create nodepool) +// func TestNodePoolGet(t *testing.T) { +// h, client := test.RegisterIntegration(t) +// +// account := h.NewRandAccount() +// ctx := h.NewAuthenticatedContext(account) +// +// // 401 using no JWT token +// _, _, err := client.DefaultAPI.GetNodePoolById(context.Background(), "foo").Execute() +// Expect(err).To(HaveOccurred(), "Expected 401 but got nil error") +// +// // GET responses per openapi spec: 200 and 404, +// _, resp, err := client.DefaultAPI.GetNodePoolById(ctx, "foo").Execute() +// Expect(err).To(HaveOccurred(), "Expected 404") +// Expect(resp.StatusCode).To(Equal(http.StatusNotFound)) +// +// nodePoolModel, err := h.Factories.NewNodePools(h.NewID()) +// Expect(err).NotTo(HaveOccurred()) +// +// nodePoolOutput, resp, err := client.DefaultAPI.GetNodePoolById(ctx, nodePoolModel.ID).Execute() +// Expect(err).NotTo(HaveOccurred()) +// Expect(resp.StatusCode).To(Equal(http.StatusOK)) +// +// Expect(*nodePoolOutput.Id).To(Equal(nodePoolModel.ID), "found object does not match test object") +// Expect(*nodePoolOutput.Kind).To(Equal("NodePool")) +// Expect(*nodePoolOutput.Href).To(Equal(fmt.Sprintf("/api/hyperfleet/v1/node_pools/%s", nodePoolModel.ID))) +// Expect(nodePoolOutput.CreatedAt).To(BeTemporally("~", nodePoolModel.CreatedAt)) +// Expect(nodePoolOutput.UpdatedAt).To(BeTemporally("~", nodePoolModel.UpdatedAt)) +// } + +func TestNodePoolPost(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a parent cluster first + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // POST responses per openapi spec: 201, 409, 500 + nodePoolInput := openapi.NodePoolCreateRequest{ + Name: "test-name", + Spec: map[string]interface{}{"test": "spec"}, + } + + // 201 Created + nodePoolOutput, resp, err := client.DefaultAPI.CreateNodePool(ctx, cluster.ID).NodePoolCreateRequest(nodePoolInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Error posting object: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(*nodePoolOutput.Id).NotTo(BeEmpty(), "Expected ID assigned on creation") + Expect(*nodePoolOutput.Kind).To(Equal("NodePool")) + Expect(*nodePoolOutput.Href).To(Equal(fmt.Sprintf("/api/hyperfleet/v1/clusters/%s/nodepools/%s", cluster.ID, *nodePoolOutput.Id))) + + // 400 bad request. posting junk json is one way to trigger 400. + jwtToken := ctx.Value(openapi.ContextAccessToken) + restyResp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + SetHeader("Authorization", fmt.Sprintf("Bearer %s", jwtToken)). + SetBody(`{ this is invalid }`). + Post(h.RestURL(fmt.Sprintf("/clusters/%s/nodepools", cluster.ID))) + + Expect(restyResp.StatusCode()).To(Equal(http.StatusBadRequest)) +} + +// TestNodePoolPatch is disabled because PATCH endpoints are not implemented +// func TestNodePoolPatch(t *testing.T) { +// // PATCH not implemented in current API +// } + +func TestNodePoolPaging(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Paging + _, err := h.Factories.NewNodePoolsList("Bronto", 20) + Expect(err).NotTo(HaveOccurred()) + + list, _, err := client.DefaultAPI.GetNodePools(ctx).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting nodePool list: %v", err) + Expect(len(list.Items)).To(Equal(20)) + Expect(list.Size).To(Equal(int32(20))) + Expect(list.Total).To(Equal(int32(20))) + Expect(list.Page).To(Equal(int32(1))) + + list, _, err = client.DefaultAPI.GetNodePools(ctx).Page(2).PageSize(5).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting nodePool list: %v", err) + Expect(len(list.Items)).To(Equal(5)) + Expect(list.Size).To(Equal(int32(5))) + Expect(list.Total).To(Equal(int32(20))) + Expect(list.Page).To(Equal(int32(2))) +} + +func TestNodePoolListSearch(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + nodePools, err := h.Factories.NewNodePoolsList("bronto", 20) + Expect(err).NotTo(HaveOccurred(), "Error creating test nodepools: %v", err) + + search := fmt.Sprintf("id in ('%s')", nodePools[0].ID) + list, _, err := client.DefaultAPI.GetNodePools(ctx).Search(search).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting nodePool list: %v", err) + Expect(len(list.Items)).To(Equal(1)) + Expect(list.Total).To(Equal(int32(1))) + Expect(*list.Items[0].Id).To(Equal(nodePools[0].ID)) +} + +func TestNodePoolsByClusterId(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a cluster first + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // Create nodepools for this cluster + // Note: In a real implementation, nodepools would be associated with the cluster + // For now, we're just creating nodepools and testing the endpoint exists + _, err = h.Factories.NewNodePoolsList("cluster-nodepools", 5) + Expect(err).NotTo(HaveOccurred()) + + // Get nodepools by cluster ID + list, resp, err := client.DefaultAPI.GetNodePoolsByClusterId(ctx, cluster.ID).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting nodepools by cluster ID: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + Expect(list).NotTo(BeNil()) + // The list might be empty if nodepools aren't properly associated with the cluster + // but the endpoint should work +} + +func TestGetNodePoolByClusterIdAndNodePoolId(t *testing.T) { + h, client := test.RegisterIntegration(t) + + account := h.NewRandAccount() + ctx := h.NewAuthenticatedContext(account) + + // Create a cluster first + cluster, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + // Create a nodepool for this cluster using the API + nodePoolInput := openapi.NodePoolCreateRequest{ + Name: "test-nodepool-get", + Spec: map[string]interface{}{"instance_type": "m5.large", "replicas": 2}, + } + + nodePoolOutput, resp, err := client.DefaultAPI.CreateNodePool(ctx, cluster.ID).NodePoolCreateRequest(nodePoolInput).Execute() + Expect(err).NotTo(HaveOccurred(), "Error creating nodepool: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusCreated)) + Expect(*nodePoolOutput.Id).NotTo(BeEmpty()) + + nodePoolID := *nodePoolOutput.Id + + // Test 1: Get the nodepool by cluster ID and nodepool ID (200 OK) + retrieved, resp, err := client.DefaultAPI.GetNodePoolById(ctx, cluster.ID, nodePoolID).Execute() + Expect(err).NotTo(HaveOccurred(), "Error getting nodepool by cluster and nodepool ID: %v", err) + Expect(resp.StatusCode).To(Equal(http.StatusOK)) + Expect(*retrieved.Id).To(Equal(nodePoolID), "Retrieved nodepool ID should match") + Expect(*retrieved.Kind).To(Equal("NodePool")) + Expect(retrieved.Name).To(Equal("test-nodepool-get")) + + // Test 2: Try to get with non-existent nodepool ID (404) + _, resp, err = client.DefaultAPI.GetNodePoolById(ctx, cluster.ID, "non-existent-id").Execute() + Expect(err).To(HaveOccurred(), "Expected 404 for non-existent nodepool") + Expect(resp.StatusCode).To(Equal(http.StatusNotFound)) + + // Test 3: Try to get with non-existent cluster ID (404) + _, resp, err = client.DefaultAPI.GetNodePoolById(ctx, "non-existent-cluster", nodePoolID).Execute() + Expect(err).To(HaveOccurred(), "Expected 404 for non-existent cluster") + Expect(resp.StatusCode).To(Equal(http.StatusNotFound)) + + // Test 4: Create another cluster and verify that nodepool is not accessible from wrong cluster + cluster2, err := h.Factories.NewClusters(h.NewID()) + Expect(err).NotTo(HaveOccurred()) + + _, resp, err = client.DefaultAPI.GetNodePoolById(ctx, cluster2.ID, nodePoolID).Execute() + Expect(err).To(HaveOccurred(), "Expected 404 when accessing nodepool from wrong cluster") + Expect(resp.StatusCode).To(Equal(http.StatusNotFound)) +} diff --git a/test/integration/openapi_test.go b/test/integration/openapi_test.go new file mode 100755 index 0000000..c2cf740 --- /dev/null +++ b/test/integration/openapi_test.go @@ -0,0 +1,100 @@ +/* +Copyright (c) 2018 Red Hat, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package integration + +import ( + "encoding/json" + "fmt" + "net/http" + "testing" + + . "github.com/onsi/gomega" + "gopkg.in/resty.v1" + + "github.com/openshift-hyperfleet/hyperfleet-api/test" +) + +func TestOpenAPIGet(t *testing.T) { + h, _ := test.RegisterIntegration(t) + + protocol := "http" + if h.AppConfig.Server.EnableHTTPS { + protocol = "https" + } + openAPIURL := fmt.Sprintf("%s://%s/api/hyperfleet/v1/openapi", protocol, h.AppConfig.Server.BindAddress) + + resp, err := resty.R(). + SetHeader("Content-Type", "application/json"). + Get(openAPIURL) + + Expect(err).NotTo(HaveOccurred(), "Error getting OpenAPI spec: %v", err) + Expect(resp.StatusCode()).To(Equal(http.StatusOK), "Expected status code 200") + + // Verify content type header + contentType := resp.Header().Get("Content-Type") + Expect(contentType).To(Equal("application/json"), "Expected Content-Type to be application/json") + + // Verify the response body is valid JSON + var openAPISpec map[string]interface{} + err = json.Unmarshal(resp.Body(), &openAPISpec) + Expect(err).NotTo(HaveOccurred(), "Error parsing OpenAPI JSON response: %v", err) + + // Verify the OpenAPI spec has required fields + Expect(openAPISpec).To(HaveKey("openapi"), "Expected OpenAPI spec to have 'openapi' field") + Expect(openAPISpec).To(HaveKey("info"), "Expected OpenAPI spec to have 'info' field") + Expect(openAPISpec).To(HaveKey("paths"), "Expected OpenAPI spec to have 'paths' field") + + // Verify the OpenAPI version + Expect(openAPISpec["openapi"]).To(Equal("3.0.0"), "Expected OpenAPI version to be 3.0.0") + + // Verify info section + info, ok := openAPISpec["info"].(map[string]interface{}) + Expect(ok).To(BeTrue(), "Expected 'info' to be an object") + Expect(info).To(HaveKey("title"), "Expected 'info' to have 'title' field") + Expect(info).To(HaveKey("version"), "Expected 'info' to have 'version' field") +} + +func TestOpenAPIUIGet(t *testing.T) { + h, _ := test.RegisterIntegration(t) + + protocol := "http" + if h.AppConfig.Server.EnableHTTPS { + protocol = "https" + } + openAPIUIURL := fmt.Sprintf("%s://%s/api/hyperfleet/v1/openapi.html", protocol, h.AppConfig.Server.BindAddress) + + t.Logf("OpenAPI UI URL: %s", openAPIUIURL) + + resp, err := resty.R(). + SetHeader("Content-Type", "text/html"). + Get(openAPIUIURL) + + Expect(err).NotTo(HaveOccurred(), "Error getting OpenAPI UI: %v", err) + Expect(resp.StatusCode()).To(Equal(http.StatusOK), "Expected status code 200") + + // Verify content type header + contentType := resp.Header().Get("Content-Type") + Expect(contentType).To(Equal("text/html"), "Expected Content-Type to be text/html") + + // Verify the response body is not empty + body := resp.String() + Expect(body).NotTo(BeEmpty(), "Expected OpenAPI UI HTML to not be empty") + + // Verify the HTML contains expected elements (basic checks) + Expect(body).To(ContainSubstring(""), "Expected HTML to contain '' tag") +} diff --git a/test/mocks/jwk_cert_server.go b/test/mocks/jwk_cert_server.go new file mode 100755 index 0000000..fecfef6 --- /dev/null +++ b/test/mocks/jwk_cert_server.go @@ -0,0 +1,46 @@ +package mocks + +import ( + "crypto" + "fmt" + "net/http" + "net/http/httptest" + "testing" + + "github.com/mendsley/gojwk" +) + +const ( + certEndpoint = "/auth/realms/rhd/protocol/openid-connect/certs" +) + +func NewJWKCertServerMock(t *testing.T, pubKey crypto.PublicKey, jwkKID string, jwkAlg string) (url string, teardown func() error) { + certHandler := http.NewServeMux() + certHandler.HandleFunc(certEndpoint, + func(w http.ResponseWriter, r *http.Request) { + pubjwk, err := gojwk.PublicKey(pubKey) + if err != nil { + t.Errorf("Unable to generate public jwk: %s", err) + return + } + pubjwk.Kid = jwkKID + pubjwk.Alg = jwkAlg + jwkBytes, err := gojwk.Marshal(pubjwk) + if err != nil { + t.Errorf("Unable to marshal public jwk: %s", err) + return + } + fmt.Fprintf(w, `{"keys":[%s]}`, string(jwkBytes)) + }, + ) + + server := httptest.NewServer(certHandler) + return fmt.Sprintf("%s%s", server.URL, certEndpoint), serverClose(server) +} + +func serverClose(server *httptest.Server) func() error { + return func() error { + server.Close() + return nil + } +} diff --git a/test/mocks/mocks.go b/test/mocks/mocks.go new file mode 100755 index 0000000..4b64881 --- /dev/null +++ b/test/mocks/mocks.go @@ -0,0 +1,19 @@ +package mocks + +import ( + "net/http" + "net/http/httptest" + "time" +) + +// NewMockServerTimeout Returns a server that will wait waitTime when hit at endpoint +func NewMockServerTimeout(endpoint string, waitTime time.Duration) (*httptest.Server, func()) { + apiHandler := http.NewServeMux() + apiHandler.HandleFunc(endpoint, + func(w http.ResponseWriter, r *http.Request) { + time.Sleep(waitTime) + }, + ) + server := httptest.NewServer(apiHandler) + return server, server.Close +} diff --git a/test/mocks/ocm.go b/test/mocks/ocm.go new file mode 100755 index 0000000..cec7d6b --- /dev/null +++ b/test/mocks/ocm.go @@ -0,0 +1,65 @@ +package mocks + +import ( + "context" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/client/ocm" +) + +/* +The OCM Validator Mock will simply return true to all access_review requests instead +of reaching out to the AMS system or using the built-in OCM mock. It will record +the action and resourceType sent to it in the struct itself. This can be used +to validate that the expected action/resourceType for a particular endpoint was +determined in the authorization middleware + +Use: + h, client := test.RegisterIntegration(t) + authzMock, ocmMock := mocks.NewOCMAuthzValidatorMockClient() + // Use the OCM client mock, re-load services so they pick up the mock + h.Env().Clients.OCM = ocmMock + // The built-in mock has to be disabled or the server will use it instead + h.Env().Config.OCM.EnableMock = false + // Services and the server should be re-loaded to pick up the client with this mock + h.Env().LoadServices() + h.RestartServer() + + // Make a request, then validate the action and resourceType + Expect(authzMock.Action).To(Equal("get")) + Expect(authzMock.ResourceType).To(Equal("JQSJobQueue")) + authzMock.Reset() +*/ + +var _ ocm.Authorization = &OCMAuthzValidatorMock{} + +type OCMAuthzValidatorMock struct { + Action string + ResourceType string +} + +func NewOCMAuthzValidatorMockClient() (*OCMAuthzValidatorMock, *ocm.Client) { + authz := &OCMAuthzValidatorMock{ + Action: "", + ResourceType: "", + } + client := &ocm.Client{} + client.Authorization = authz + return authz, client +} + +func (m *OCMAuthzValidatorMock) SelfAccessReview(ctx context.Context, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { + m.Action = action + m.ResourceType = resourceType + return true, nil +} + +func (m *OCMAuthzValidatorMock) AccessReview(ctx context.Context, username, action, resourceType, organizationID, subscriptionID, clusterID string) (allowed bool, err error) { + m.Action = action + m.ResourceType = resourceType + return true, nil +} + +func (m OCMAuthzValidatorMock) Reset() { + m.Action = "" + m.ResourceType = "" +} diff --git a/test/registration.go b/test/registration.go new file mode 100755 index 0000000..defd794 --- /dev/null +++ b/test/registration.go @@ -0,0 +1,24 @@ +package test + +import ( + "testing" + + gm "github.com/onsi/gomega" + + "github.com/openshift-hyperfleet/hyperfleet-api/pkg/api/openapi" +) + +// RegisterIntegration Register a test +// This should be run before every integration test +func RegisterIntegration(t *testing.T) (*Helper, *openapi.APIClient) { + // Register the test with gomega + gm.RegisterTestingT(t) + // Create a new helper + helper := NewHelper(t) + // Reset the database to a seeded blank state + helper.DBFactory.ResetDB() + // Create an api client + client := helper.NewApiClient() + + return helper, client +} diff --git a/test/support/certs.json b/test/support/certs.json new file mode 100755 index 0000000..71cb126 --- /dev/null +++ b/test/support/certs.json @@ -0,0 +1,12 @@ +{ + "keys": [ + { + "kid": "HjYaVHwyM77lw0mv7ko-qC7tKri03jqSukNea0SWY7M", + "kty": "RSA", + "alg": "RS256", + "use": "sig", + "n": "q6DF0dZFJnnVVIUtyaVV9Hial9hsSRXtH8Z01kOoAdGwQLqFjKDzNeliOL9KL0i-D71Bo9vKp13Qo8r9UjjNPGV6HzxgXR95MIZP4nqWo9Qp_9SHOjxMSqg-ZFf45p0pSKRdgKTfzu0eJ1CpZt4BdYM9wM3iuOgon09hIMKcO0AU7xqX0KmCg-ToIgVDCaGtXqcC0qv3fr7acTUBoVd8sWNaIOKXiL90cR7oZX_wLoApF2cQyrgTozaMrdEe3RuvwU8hE_r3kYTUYsxTv0liJ8FRfuO5FJuEGVpYc7QDyIztt9YOqowQgHq_2IhqcWhULtzGIXh26voAgWfA2BGAFw", + "e": "AQAB" + } + ] +} diff --git a/test/support/jwt_ca.pem b/test/support/jwt_ca.pem new file mode 100755 index 0000000..458bcc5 --- /dev/null +++ b/test/support/jwt_ca.pem @@ -0,0 +1,19 @@ +-----BEGIN CERTIFICATE----- +MIIC/zCCAeegAwIBAgIBATANBgkqhkiG9w0BAQUFADAaMQswCQYDVQQGEwJVUzEL +MAkGA1UECgwCWjQwHhcNMTMwODI4MTgyODM0WhcNMjMwODI4MTgyODM0WjAaMQsw +CQYDVQQGEwJVUzELMAkGA1UECgwCWjQwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAw +ggEKAoIBAQDfdOqotHd55SYO0dLz2oXengw/tZ+q3ZmOPeVmMuOMIYO/Cv1wk2U0 +OK4pug4OBSJPhl09Zs6IwB8NwPOU7EDTgMOcQUYB/6QNCI1J7Zm2oLtuchzz4pIb ++o4ZAhVprLhRyvqi8OTKQ7kfGfs5Tuwmn1M/0fQkfzMxADpjOKNgf0uy6lN6utjd +TrPKKFUQNdc6/Ty8EeTnQEwUlsT2LAXCfEKxTn5RlRljDztS7Sfgs8VL0FPy1Qi8 +B+dFcgRYKFrcpsVaZ1lBmXKsXDRu5QR/Rg3f9DRq4GR1sNH8RLY9uApMl2SNz+sR +4zRPG85R/se5Q06Gu0BUQ3UPm67ETVZLAgMBAAGjUDBOMB0GA1UdDgQWBBQHZPTE +yQVu/0I/3QWhlTyW7WoTzTAfBgNVHSMEGDAWgBQHZPTEyQVu/0I/3QWhlTyW7WoT +zTAMBgNVHRMEBTADAQH/MA0GCSqGSIb3DQEBBQUAA4IBAQDHxqJ9y8alTH7agVMW +Zfic/RbrdvHwyq+IOrgDToqyo0w+IZ6BCn9vjv5iuhqu4ForOWDAFpQKZW0DLBJE +Qy/7/0+9pk2DPhK1XzdOovlSrkRt+GcEpGnUXnzACXDBbO0+Wrk+hcjEkQRRK1bW +2rknARIEJG9GS+pShP9Bq/0BmNsMepdNcBa0z3a5B0fzFyCQoUlX6RTqxRw1h1Qt +5F00pfsp7SjXVIvYcewHaNASbto1n5hrSz1VY9hLba11ivL1N4WoWbmzAL6BWabs +C2D/MenST2/X6hTKyGXpg3Eg2h3iLvUtwcNny0hRKstc73Jl9xR3qXfXKJH0ThTl +q0gq +-----END CERTIFICATE-----